source: trunk/src/dnmalloc.c@ 356

Last change on this file since 356 was 279, checked in by katerina, 15 years ago

Fix for tickets #200 to #206 (kernel check, login checks, bugfixes).

File size: 164.3 KB
RevLine 
[171]1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
[172]2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
[171]5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
8 Yves Younan
9 Celestijnenlaan 200A
10 B-3001 Heverlee
11 Belgium
12
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 2.1 of the License, or (at your option) any later version.
17
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26
27*/
28
[172]29/* Current version: dnmalloc 1.0 */
[171]30/* Includes arc4random from OpenBSD, which is under the BDS license */
31
32/* Versions:
33 0.1-0.5:
34 Proof of concept implementation by Hans Van den Eynden and Yves Younan
35 0.6-0.7:
36 Bug fixes by Yves Younan
37 0.8-1.0.beta4:
38 Reimplementation from scratch by Yves Younan
39 1.0.beta4:
40 Public release
41 1.0.beta5:
42 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
43 A freechunkinfo region is now freed when it is completely empty and
44 not the current one
45
46 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
47 ---------------------
48
49 Compiler warnings fixed
50 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
51 (and what the standard says)
52 Removed unused code
53 Fix assert(aligned_OK(chunk(newp)));
54 -> assert(aligned_OK(chunk(oldp)));
55 Fix statistics in sYSMALLOc
56 Fix overwrite of av->top in sYSMALLOc
57 Provide own assert(), glibc assert() doesn't work (calls malloc)
58 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
59 Remove cfree, independent_cmalloc, independent_comalloc (untested
60 public functions not covered by any standard)
61 Provide posix_memalign (that one is in the standard)
62 Move the malloc_state struct to mmapped memory protected by guard pages
63 Add arc4random function to initialize random canary on startup
64 Implement random canary at end of (re|m)alloced/memaligned buffer,
65 check at free/realloc
66 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
67 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
68
69 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
70 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
71 checked_request2size (gcc predicts if{} branch to be taken).
72 Use UNLIKELY macro (gcc __builtin_expect()) where branch
73 reordering would make the code awkward.
74
75 Portability: Hashtable always covers full 32bit address space to
76 avoid assumptions about memory layout.
77 Portability: Try hard to enforce mapping of mmapped memory into
78 32bit address space, even on 64bit systems.
79 Portability: Provide a dnmalloc_pthread_init() function, since
80 pthread locking on HP-UX only works if initialized
81 after the application has entered main().
82 Portability: On *BSD, pthread_mutex_lock is unusable since it
83 calls malloc, use spinlocks instead.
84 Portability: Dynamically detect whether the heap is within
85 32bit address range (e.g. on Linux x86_64, it isn't).
86 Don't use sbrk() if the heap is mapped to an address
87 outside the 32bit range, since this doesn't work with
88 the hashtable. New macro morecore32bit.
89
90 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
91 FreeBSD/pthread, and Solaris 10 i386/pthread.
92 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
93 might be related to OpenBSD pthread internals (??).
[228]94 Non-treaded version (#undef USE_MALLOC_LOCK)
[171]95 works on OpenBSD.
96
[172]97 further to 1.0:
98 Valgrind client requests inserted (#define USE_VALGRIND)
[173]99 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
[172]100 Portability: minsize = 32 bit on 64bit architecture
101 Minor cleanups
[173]102 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
[228]103 Fix: one more malloc_consolidate segfault
[172]104
[171]105 There may be some bugs left in this version. please use with caution.
106*/
107
108
109
110/* Please read the following papers for documentation:
111
112 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
113 Countermeasures against Current and Future Code Injection Attacks,
114 Proceedings of the Third IEEE International Information Assurance
115 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
116 IEEE, IEEE Press.
117 http://www.fort-knox.org/younany_countermeasures.pdf
118
119 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
120 Eynden. Security of Memory Allocators for C and C++. Technical Report
121 CW419, Departement Computerwetenschappen, Katholieke Universiteit
122 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
123
124 */
125
126/* Compile:
127 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
128 "Link":
129 Dynamic:
130 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
131 Static:
132 ar -rv libdnmalloc.a dnmalloc-portable.o
133
134*/
135
136/*
137 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
138 dlmalloc was released as public domain and contained the following license:
139
140 "This is a version (aka dlmalloc) of malloc/free/realloc written by
141 Doug Lea and released to the public domain. Use, modify, and
142 redistribute this code without permission or acknowledgement in any
143 way you wish. Send questions, comments, complaints, performance
144 data, etc to dl@cs.oswego.edu
145
146 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
147
148 Note: There may be an updated version of this malloc obtainable at
149 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
150 Check before installing!"
151
152*/
153
154/* The following preprocessor macros are tested,
155 * and hence should have #define directives:
156 *
157 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
158 *
159 * HAVE_UNISTD_H Define to #include <unistd.h>
160 *
161 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
162 * HAVE_WRITEV Define if the 'writev' function is available
163 *
164 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
165 *
166 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
167 *
168 * HAVE_FCNTL_H Define to #include <fcntl.h>
169 *
170 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
171 * HAVE_MMAP Define if the 'mmap' function is available.
172 *
173 * HAVE_SCHED_H Define to #include <sched.h>
174 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
175 */
176
177
178/*
179 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
180 compiler, or a C compiler sufficiently close to ANSI to get away
181 with it.
182*/
183
184#ifdef HAVE_CONFIG_H
185#include "config.h"
186#endif
187
[172]188#ifdef USE_VALGRIND
189#include <valgrind/memcheck.h>
190#else
191#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
192#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
193#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
194#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
195#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
196#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
197#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
198#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
199#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
200#endif
201
[171]202#if defined (__GNUC__) && __GNUC__ > 2
203# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
204# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
205# define __attribute_malloc__ __attribute__ ((__malloc__))
206#else
207# define LIKELY(x) (x)
208# define UNLIKELY(x) (x)
209# define __attribute_malloc__ /* Ignore */
210#endif
211
212/*
213 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
214 large blocks. This is currently only possible on Linux with
215 kernel versions newer than 1.3.77.
216*/
217
218#ifndef HAVE_MREMAP
219#ifdef linux
220#define HAVE_MREMAP 1
221#define _GNU_SOURCE
222#else
223#define HAVE_MREMAP 0
224#endif
225#endif /* HAVE_MREMAP */
226
227
228
229#ifndef __STD_C
230#if defined(__STDC__) || defined(_cplusplus)
231#define __STD_C 1
232#else
233#define __STD_C 0
234#endif
235#endif /*__STD_C*/
236
237
238/*
239 Void_t* is the pointer type that malloc should say it returns
240*/
241
242#ifndef Void_t
243#if (__STD_C || defined(WIN32))
244#define Void_t void
245#else
246#define Void_t char
247#endif
248#endif /*Void_t*/
249
250#if __STD_C
251#include <stddef.h> /* for size_t */
252#else
253#include <sys/types.h>
254#endif
255
256#if !defined(USE_SYSTEM_MALLOC)
257
258#ifdef __cplusplus
259extern "C" {
260#endif
261
262/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
263
264#ifdef HAVE_UNISTD_H
265#include <unistd.h>
266#endif
267
268#ifdef HAVE_SYS_UIO_H
269#include <sys/uio.h>
270#endif
271
272#include <stdio.h> /* needed for malloc_stats */
273#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
274
275#include <string.h>
276#include <stdlib.h>
277
278#include <sys/resource.h>
279
280extern int errno;
281
282 /* 0: lazy,
283 * 1: medium (assertions compiled in),
284 * 2: high (guard pages at end of hash table and ciregions)
285 * 3: paranoid (guards at end of each allocated chunk, check at free)
286 */
287#ifndef PARANOIA
288#define PARANOIA 9
289#endif
290
291 /* Using assert() with multithreading will cause the code
292 * to deadlock since glibc __assert_fail will call malloc().
293 * We need our very own assert().
294 */
295typedef void assert_handler_tp(const char * error, const char *file, int line);
296
297#if PARANOIA > 0
298
299#ifdef NDEBUG
300#undef NDEBUG
301#endif
302
303static void default_assert_handler(const char *error,
304 const char *file, int line)
305{
306#ifdef HAVE_WRITEV
307 struct iovec iov[5];
308 char * i1 = "assertion failed (";
309 char * i3 = "): ";
310 char * i5 = "\n";
[279]311 int res = 0;
[171]312
313 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
314 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
315 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
316 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
[279]317 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
318 do {
319 res = writev(STDERR_FILENO, iov, 5);
320 } while (res < 0 && errno == EINTR);
[171]321#else
322 fputs("assertion failed (", stderr);
323 fputs(file, stderr);
324 fputs("): ", stderr);
325 fputs(error, stderr);
326 fputc('\n', stderr);
327#endif
[172]328 (void) line;
[171]329 abort();
330}
331static assert_handler_tp *assert_handler = default_assert_handler;
332
[172]333
[171]334#define assert(x) \
335 do { \
336 if (UNLIKELY(!(x))) { \
337 assert_handler(#x, __FILE__, __LINE__); \
338 } \
339 } while (0)
340
341#else
342
343static assert_handler_tp *assert_handler = NULL;
344#define NDEBUG
345#define assert(x) ((void)0)
346
347#endif
348
349assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
350{
351 assert_handler_tp *old = assert_handler;
352 assert_handler = new;
353 return old;
354}
355
356
357#include <stdarg.h>
358
359 /* define for debugging */
360 /* #define DNMALLOC_DEBUG */
361
[172]362 /* Do some extra checks? if not, covered by assrt()s */
[171]363 /* #define DNMALLOC_CHECKS */
364
365 /*
366 The unsigned integer type used for comparing any two chunk sizes.
367 This should be at least as wide as size_t, but should not be signed.
368 */
369
370#ifndef CHUNK_SIZE_T
371#define CHUNK_SIZE_T unsigned long
372#endif
373
374/*
375 The unsigned integer type used to hold addresses when they are are
376 manipulated as integers. Except that it is not defined on all
377 systems, intptr_t would suffice.
378*/
379#ifndef PTR_UINT
380#define PTR_UINT unsigned long
381#endif
382
383
384/*
385 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
386 of chunk sizes.
387
388 The default version is the same as size_t.
389
390 While not strictly necessary, it is best to define this as an
391 unsigned type, even if size_t is a signed type. This may avoid some
392 artificial size limitations on some systems.
393
394 On a 64-bit machine, you may be able to reduce malloc overhead by
395 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
396 expense of not being able to handle more than 2^32 of malloced
397 space. If this limitation is acceptable, you are encouraged to set
398 this unless you are on a platform requiring 16byte alignments. In
399 this case the alignment requirements turn out to negate any
400 potential advantages of decreasing size_t word size.
401
402 Implementors: Beware of the possible combinations of:
403 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
404 and might be the same width as int or as long
405 - size_t might have different width and signedness as INTERNAL_SIZE_T
406 - int and long might be 32 or 64 bits, and might be the same width
407 To deal with this, most comparisons and difference computations
408 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
409 aware of the fact that casting an unsigned int to a wider long does
410 not sign-extend. (This also makes checking for negative numbers
411 awkward.) Some of these casts result in harmless compiler warnings
412 on some systems.
413*/
414
415#ifndef INTERNAL_SIZE_T
416#define INTERNAL_SIZE_T size_t
417#endif
418
419/* The corresponding word size */
420#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
421
422
423
424/*
425 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
426 It must be a power of two at least 2 * SIZE_SZ, even on machines
427 for which smaller alignments would suffice. It may be defined as
428 larger than this though. Note however that code and data structures
429 are optimized for the case of 8-byte alignment.
430*/
431
432
433#ifndef MALLOC_ALIGNMENT
434#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
435#endif
436
437/* The corresponding bit mask value */
438#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
439
440
441
442/*
443 REALLOC_ZERO_BYTES_FREES should be set if a call to
444 realloc with zero bytes should be the same as a call to free.
445 Some people think it should. Otherwise, since this malloc
446 returns a unique pointer for malloc(0), so does realloc(p, 0).
447*/
448
449#define REALLOC_ZERO_BYTES_FREES
450
451/*
452 TRIM_FASTBINS controls whether free() of a very small chunk can
453 immediately lead to trimming. Setting to true (1) can reduce memory
454 footprint, but will almost always slow down programs that use a lot
455 of small chunks.
456
457 Define this only if you are willing to give up some speed to more
458 aggressively reduce system-level memory footprint when releasing
459 memory in programs that use many small chunks. You can get
460 essentially the same effect by setting MXFAST to 0, but this can
461 lead to even greater slowdowns in programs using many small chunks.
462 TRIM_FASTBINS is an in-between compile-time option, that disables
463 only those chunks bordering topmost memory from being placed in
464 fastbins.
465*/
466
467#ifndef TRIM_FASTBINS
468#define TRIM_FASTBINS 0
469#endif
470
471
472/*
473 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
474 This is necessary when you only want to use this malloc in one part
475 of a program, using your regular system malloc elsewhere.
476*/
477
478/* #define USE_DL_PREFIX */
479
480
481/*
482 USE_MALLOC_LOCK causes wrapper functions to surround each
483 callable routine with pthread mutex lock/unlock.
484
485 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
486*/
487
488/* #define USE_MALLOC_LOCK */
489
490
491/*
492 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
493 actually a wrapper function that first calls MALLOC_PREACTION, then
494 calls the internal routine, and follows it with
495 MALLOC_POSTACTION. This is needed for locking, but you can also use
496 this, without USE_MALLOC_LOCK, for purposes of interception,
497 instrumentation, etc. It is a sad fact that using wrappers often
498 noticeably degrades performance of malloc-intensive programs.
499*/
500
501
502#ifdef USE_MALLOC_LOCK
503#define USE_PUBLIC_MALLOC_WRAPPERS
504#else
505/* #define USE_PUBLIC_MALLOC_WRAPPERS */
506#endif
507
508
509/*
510 Two-phase name translation.
511 All of the actual routines are given mangled names.
512 When wrappers are used, they become the public callable versions.
513 When DL_PREFIX is used, the callable names are prefixed.
514*/
515
516#ifndef USE_PUBLIC_MALLOC_WRAPPERS
517#define cALLOc public_cALLOc
518#define fREe public_fREe
519#define mALLOc public_mALLOc
520#define mEMALIGn public_mEMALIGn
521#define posix_mEMALIGn public_posix_mEMALIGn
522#define rEALLOc public_rEALLOc
523#define vALLOc public_vALLOc
524#define pVALLOc public_pVALLOc
525#define mALLINFo public_mALLINFo
526#define mALLOPt public_mALLOPt
527#define mTRIm public_mTRIm
528#define mSTATs public_mSTATs
529#define mUSABLe public_mUSABLe
530#endif
531
532#ifdef USE_DL_PREFIX
533#define public_cALLOc dlcalloc
534#define public_fREe dlfree
535#define public_mALLOc dlmalloc
536#define public_mEMALIGn dlmemalign
537#define public_posix_mEMALIGn dlposix_memalign
538#define public_rEALLOc dlrealloc
539#define public_vALLOc dlvalloc
540#define public_pVALLOc dlpvalloc
541#define public_mALLINFo dlmallinfo
542#define public_mALLOPt dlmallopt
543#define public_mTRIm dlmalloc_trim
544#define public_mSTATs dlmalloc_stats
545#define public_mUSABLe dlmalloc_usable_size
546#else /* USE_DL_PREFIX */
547#define public_cALLOc calloc
548#define public_fREe free
549#define public_mALLOc malloc
550#define public_mEMALIGn memalign
551#define public_posix_mEMALIGn posix_memalign
552#define public_rEALLOc realloc
553#define public_vALLOc valloc
554#define public_pVALLOc pvalloc
555#define public_mALLINFo mallinfo
556#define public_mALLOPt mallopt
557#define public_mTRIm malloc_trim
558#define public_mSTATs malloc_stats
559#define public_mUSABLe malloc_usable_size
560#endif /* USE_DL_PREFIX */
561
562
563/*
564 HAVE_MEMCPY should be defined if you are not otherwise using
565 ANSI STD C, but still have memcpy and memset in your C library
566 and want to use them in calloc and realloc. Otherwise simple
567 macro versions are defined below.
568
569 USE_MEMCPY should be defined as 1 if you actually want to
570 have memset and memcpy called. People report that the macro
571 versions are faster than libc versions on some systems.
572
573 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
574 (of <= 36 bytes) are manually unrolled in realloc and calloc.
575*/
576
577#ifndef HAVE_MEMCPY
578#define HAVE_MEMCPY
579#endif
580
581#ifndef USE_MEMCPY
582#ifdef HAVE_MEMCPY
583#define USE_MEMCPY 1
584#else
585#define USE_MEMCPY 0
586#endif
587#endif
588
589
590#if (__STD_C || defined(HAVE_MEMCPY))
591
592#ifdef WIN32
[173]593 /* On Win32 memset and memcpy are already declared in windows.h */
[171]594#else
595#if __STD_C
[173]596 /* Defined in string.h */
[171]597#else
598Void_t* memset();
599Void_t* memcpy();
600#endif
601#endif
602#endif
603
604/*
605 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
606 malloc fails to be able to return memory, either because memory is
607 exhausted or because of illegal arguments.
608
609 By default, sets errno if running on STD_C platform, else does nothing.
610*/
611
612#ifndef MALLOC_FAILURE_ACTION
613#if __STD_C
614#define MALLOC_FAILURE_ACTION \
615 errno = ENOMEM;
616
617#else
618#define MALLOC_FAILURE_ACTION
619#endif
620#endif
621
622/*
623 MORECORE-related declarations. By default, rely on sbrk
624*/
625
626
627#if !defined(HAVE_UNISTD_H)
628#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
629#if __STD_C
630extern Void_t* sbrk(ptrdiff_t);
631#else
632extern Void_t* sbrk();
633#endif
634#endif
635#endif
636
637/*
638 MORECORE_FAILURE is the value returned upon failure of MORECORE
639 as well as mmap. Since it cannot be an otherwise valid memory address,
640 and must reflect values of standard sys calls, you probably ought not
641 try to redefine it.
642*/
643
644#ifndef MORECORE_FAILURE
645#define MORECORE_FAILURE ((void*)(-1UL))
646#endif
647
648/*
649 MORECORE is the name of the routine to call to obtain more memory
650 from the system. See below for general guidance on writing
651 alternative MORECORE functions, as well as a version for WIN32 and a
652 sample version for pre-OSX macos.
653*/
654
655#ifndef MORECORE
656#define MORECORE sbrk
657#endif
658
659
660/*
661 If MORECORE_CONTIGUOUS is true, take advantage of fact that
662 consecutive calls to MORECORE with positive arguments always return
663 contiguous increasing addresses. This is true of unix sbrk. Even
664 if not defined, when regions happen to be contiguous, malloc will
665 permit allocations spanning regions obtained from different
666 calls. But defining this when applicable enables some stronger
667 consistency checks and space efficiencies.
668*/
669
670#ifndef MORECORE_CONTIGUOUS
671#define MORECORE_CONTIGUOUS 1
672#endif
673
674/*
675 Define MORECORE_CANNOT_TRIM if your version of MORECORE
676 cannot release space back to the system when given negative
677 arguments. This is generally necessary only if you are using
678 a hand-crafted MORECORE function that cannot handle negative arguments.
679*/
680
681/* #define MORECORE_CANNOT_TRIM */
682
683
684/*
685 This malloc requires mmap for heap management data. It is an error
686 if mmap is not available.
687
688 Additionally, mmap will be used to satisfy large requests.
689*/
690
691#ifndef HAVE_MMAP
[172]692# error HAVE_MMAP not defined, has your operating system mmap?
[171]693#endif
694
695/*
696 Standard unix mmap using /dev/zero clears memory so calloc doesn't
697 need to.
698*/
699
700#ifndef MMAP_CLEARS
701#define MMAP_CLEARS 1
702#endif
703
704
705/*
706 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
707 sbrk fails, and mmap is used as a backup (which is done only if
708 HAVE_MMAP). The value must be a multiple of page size. This
709 backup strategy generally applies only when systems have "holes" in
710 address space, so sbrk cannot perform contiguous expansion, but
711 there is still space available on system. On systems for which
712 this is known to be useful (i.e. most linux kernels), this occurs
713 only when programs allocate huge amounts of memory. Between this,
714 and the fact that mmap regions tend to be limited, the size should
715 be large, to avoid too many mmap calls and thus avoid running out
716 of kernel resources.
717*/
718
719#ifndef MMAP_AS_MORECORE_SIZE
720#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
721#endif
722
723
724/*
725 The system page size. To the extent possible, this malloc manages
726 memory from the system in page-size units. Note that this value is
727 cached during initialization into a field of malloc_state. So even
728 if malloc_getpagesize is a function, it is only called once.
729
730 The following mechanics for getpagesize were adapted from bsd/gnu
731 getpagesize.h. If none of the system-probes here apply, a value of
732 4096 is used, which should be OK: If they don't apply, then using
733 the actual value probably doesn't impact performance.
734*/
735
736
737#ifndef malloc_getpagesize
738
739# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
740# ifndef _SC_PAGE_SIZE
741# define _SC_PAGE_SIZE _SC_PAGESIZE
742# endif
743# endif
744
745# ifdef _SC_PAGE_SIZE
746# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
747# else
748# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
749 extern size_t getpagesize();
750# define malloc_getpagesize getpagesize()
751# else
752# ifdef WIN32 /* use supplied emulation of getpagesize */
753# define malloc_getpagesize getpagesize()
754# else
755# if defined(HAVE_SYS_PARAM_H)
756# include <sys/param.h>
757# endif
758# ifdef EXEC_PAGESIZE
759# define malloc_getpagesize EXEC_PAGESIZE
760# else
761# ifdef NBPG
762# ifndef CLSIZE
763# define malloc_getpagesize NBPG
764# else
765# define malloc_getpagesize (NBPG * CLSIZE)
766# endif
767# else
768# ifdef NBPC
769# define malloc_getpagesize NBPC
770# else
771# ifdef PAGESIZE
772# define malloc_getpagesize PAGESIZE
773# else /* just guess */
774# define malloc_getpagesize (4096)
775# endif
776# endif
777# endif
778# endif
779# endif
780# endif
781# endif
782#endif
783
784/*
785 This version of malloc supports the standard SVID/XPG mallinfo
786 routine that returns a struct containing usage properties and
787 statistics. It should work on any SVID/XPG compliant system that has
788 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
789 install such a thing yourself, cut out the preliminary declarations
790 as described above and below and save them in a malloc.h file. But
791 there's no compelling reason to bother to do this.)
792
793 The main declaration needed is the mallinfo struct that is returned
794 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
795 bunch of fields that are not even meaningful in this version of
796 malloc. These fields are are instead filled by mallinfo() with
797 other numbers that might be of interest.
798
799 HAVE_MALLOC_H should be set if you have a
800 /usr/include/malloc.h file that includes a declaration of struct
801 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
802 version is declared below. These must be precisely the same for
803 mallinfo() to work. The original SVID version of this struct,
804 defined on most systems with mallinfo, declares all fields as
805 ints. But some others define as unsigned long. If your system
806 defines the fields using a type of different width than listed here,
807 you must #include your system version and #define
808 HAVE_MALLOC_H.
809*/
810
811/* #define HAVE_MALLOC_H */
812
813/* On *BSD, malloc.h is deprecated, and on some *BSD including
814 * it may actually raise an error.
815 */
816#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
817#include <malloc.h>
818#else
819
820/* SVID2/XPG mallinfo structure */
821
822struct mallinfo {
823 int arena; /* non-mmapped space allocated from system */
824 int ordblks; /* number of free chunks */
825 int smblks; /* number of fastbin blocks */
826 int hblks; /* number of mmapped regions */
827 int hblkhd; /* space in mmapped regions */
828 int usmblks; /* maximum total allocated space */
829 int fsmblks; /* space available in freed fastbin blocks */
830 int uordblks; /* total allocated space */
831 int fordblks; /* total free space */
832 int keepcost; /* top-most, releasable (via malloc_trim) space */
833};
834
835/*
836 SVID/XPG defines four standard parameter numbers for mallopt,
837 normally defined in malloc.h. Only one of these (M_MXFAST) is used
838 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
839 so setting them has no effect. But this malloc also supports other
840 options in mallopt described below.
841*/
842#endif
843
844
845/* ---------- description of public routines ------------ */
846
847/*
848 malloc(size_t n)
849 Returns a pointer to a newly allocated chunk of at least n bytes, or null
850 if no space is available. Additionally, on failure, errno is
851 set to ENOMEM on ANSI C systems.
852
853 If n is zero, malloc returns a minumum-sized chunk. (The minimum
854 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
855 systems.) On most systems, size_t is an unsigned type, so calls
856 with negative arguments are interpreted as requests for huge amounts
857 of space, which will often fail. The maximum supported value of n
858 differs across systems, but is in all cases less than the maximum
859 representable value of a size_t.
860*/
861#if __STD_C
862Void_t* public_mALLOc(size_t) __attribute_malloc__;
863#else
864Void_t* public_mALLOc();
865#endif
866
867/*
868 free(Void_t* p)
869 Releases the chunk of memory pointed to by p, that had been previously
870 allocated using malloc or a related routine such as realloc.
871 It has no effect if p is null. It can have arbitrary (i.e., bad!)
872 effects if p has already been freed.
873
874 Unless disabled (using mallopt), freeing very large spaces will
875 when possible, automatically trigger operations that give
876 back unused memory to the system, thus reducing program footprint.
877*/
878#if __STD_C
879void public_fREe(Void_t*);
880#else
881void public_fREe();
882#endif
883
884/*
885 calloc(size_t n_elements, size_t element_size);
886 Returns a pointer to n_elements * element_size bytes, with all locations
887 set to zero.
888*/
889#if __STD_C
890Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
891#else
892Void_t* public_cALLOc();
893#endif
894
895/*
896 realloc(Void_t* p, size_t n)
897 Returns a pointer to a chunk of size n that contains the same data
898 as does chunk p up to the minimum of (n, p's size) bytes, or null
899 if no space is available.
900
901 The returned pointer may or may not be the same as p. The algorithm
902 prefers extending p when possible, otherwise it employs the
903 equivalent of a malloc-copy-free sequence.
904
905 If p is null, realloc is equivalent to malloc.
906
907 If space is not available, realloc returns null, errno is set (if on
908 ANSI) and p is NOT freed.
909
910 if n is for fewer bytes than already held by p, the newly unused
911 space is lopped off and freed if possible. Unless the #define
912 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
913 zero (re)allocates a minimum-sized chunk.
914
915 Large chunks that were internally obtained via mmap will always
916 be reallocated using malloc-copy-free sequences unless
917 the system supports MREMAP (currently only linux).
918
919 The old unix realloc convention of allowing the last-free'd chunk
920 to be used as an argument to realloc is not supported.
921*/
922#if __STD_C
923Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
924#else
925Void_t* public_rEALLOc();
926#endif
927
928/*
929 memalign(size_t alignment, size_t n);
930 Returns a pointer to a newly allocated chunk of n bytes, aligned
931 in accord with the alignment argument.
932
933 The alignment argument should be a power of two. If the argument is
934 not a power of two, the nearest greater power is used.
935 8-byte alignment is guaranteed by normal malloc calls, so don't
936 bother calling memalign with an argument of 8 or less.
937
938 Overreliance on memalign is a sure way to fragment space.
939*/
940#if __STD_C
941Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
942#else
943Void_t* public_mEMALIGn();
944#endif
945
946/*
947 posix_memalign(void** memptr, size_t alignment, size_t n);
948 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
949 in accord with the alignment argument. Returns 0 on success, otherwise
950 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
951
952 The alignment must be a power of two, and a multiple of sizeof(void *).
953*/
954#if __STD_C
955int public_posix_mEMALIGn(Void_t**, size_t, size_t);
956#else
957int public_posix_mEMALIGn();
958#endif
959
960/*
961 valloc(size_t n);
962 Equivalent to memalign(pagesize, n), where pagesize is the page
963 size of the system. If the pagesize is unknown, 4096 is used.
964*/
965#if __STD_C
966Void_t* public_vALLOc(size_t) __attribute_malloc__;
967#else
968Void_t* public_vALLOc();
969#endif
970
971
972
973/*
974 mallopt(int parameter_number, int parameter_value)
975 Sets tunable parameters The format is to provide a
976 (parameter-number, parameter-value) pair. mallopt then sets the
977 corresponding parameter to the argument value if it can (i.e., so
978 long as the value is meaningful), and returns 1 if successful else
979 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
980 normally defined in malloc.h. Only one of these (M_MXFAST) is used
981 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
982 so setting them has no effect. But this malloc also supports four
983 other options in mallopt. See below for details. Briefly, supported
984 parameters are as follows (listed defaults are for "typical"
985 configurations).
986
987 Symbol param # default allowed param values
988 M_MXFAST 1 64 0-80 (0 disables fastbins)
989 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
990 M_TOP_PAD -2 0 any
991 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
992 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
993*/
994#if __STD_C
995int public_mALLOPt(int, int);
996#else
997int public_mALLOPt();
998#endif
999
1000
1001/*
1002 mallinfo()
1003 Returns (by copy) a struct containing various summary statistics:
1004
1005 arena: current total non-mmapped bytes allocated from system
1006 ordblks: the number of free chunks
1007 smblks: the number of fastbin blocks (i.e., small chunks that
1008 have been freed but not use resused or consolidated)
1009 hblks: current number of mmapped regions
1010 hblkhd: total bytes held in mmapped regions
1011 usmblks: the maximum total allocated space. This will be greater
1012 than current total if trimming has occurred.
1013 fsmblks: total bytes held in fastbin blocks
1014 uordblks: current total allocated space (normal or mmapped)
1015 fordblks: total free space
1016 keepcost: the maximum number of bytes that could ideally be released
1017 back to system via malloc_trim. ("ideally" means that
1018 it ignores page restrictions etc.)
1019
1020 Because these fields are ints, but internal bookkeeping may
1021 be kept as longs, the reported values may wrap around zero and
1022 thus be inaccurate.
1023*/
1024#if __STD_C
1025struct mallinfo public_mALLINFo(void);
1026#else
1027struct mallinfo public_mALLINFo();
1028#endif
1029
1030/*
1031 pvalloc(size_t n);
1032 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1033 round up n to nearest pagesize.
1034 */
1035#if __STD_C
1036Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1037#else
1038Void_t* public_pVALLOc();
1039#endif
1040
1041/*
1042 malloc_trim(size_t pad);
1043
1044 If possible, gives memory back to the system (via negative
1045 arguments to sbrk) if there is unused memory at the `high' end of
1046 the malloc pool. You can call this after freeing large blocks of
1047 memory to potentially reduce the system-level memory requirements
1048 of a program. However, it cannot guarantee to reduce memory. Under
1049 some allocation patterns, some large free blocks of memory will be
1050 locked between two used chunks, so they cannot be given back to
1051 the system.
1052
1053 The `pad' argument to malloc_trim represents the amount of free
1054 trailing space to leave untrimmed. If this argument is zero,
1055 only the minimum amount of memory to maintain internal data
1056 structures will be left (one page or less). Non-zero arguments
1057 can be supplied to maintain enough trailing space to service
1058 future expected allocations without having to re-obtain memory
1059 from the system.
1060
1061 Malloc_trim returns 1 if it actually released any memory, else 0.
1062 On systems that do not support "negative sbrks", it will always
1063 rreturn 0.
1064*/
1065#if __STD_C
1066int public_mTRIm(size_t);
1067#else
1068int public_mTRIm();
1069#endif
1070
1071/*
1072 malloc_usable_size(Void_t* p);
1073
1074 Returns the number of bytes you can actually use in
1075 an allocated chunk, which may be more than you requested (although
1076 often not) due to alignment and minimum size constraints.
1077 You can use this many bytes without worrying about
1078 overwriting other allocated objects. This is not a particularly great
1079 programming practice. malloc_usable_size can be more useful in
1080 debugging and assertions, for example:
1081
1082 p = malloc(n);
1083 assert(malloc_usable_size(p) >= 256);
1084
1085*/
1086#if __STD_C
1087size_t public_mUSABLe(Void_t*);
1088#else
1089size_t public_mUSABLe();
1090#endif
1091
1092/*
1093 malloc_stats();
1094 Prints on stderr the amount of space obtained from the system (both
1095 via sbrk and mmap), the maximum amount (which may be more than
1096 current if malloc_trim and/or munmap got called), and the current
1097 number of bytes allocated via malloc (or realloc, etc) but not yet
1098 freed. Note that this is the number of bytes allocated, not the
1099 number requested. It will be larger than the number requested
1100 because of alignment and bookkeeping overhead. Because it includes
1101 alignment wastage as being in use, this figure may be greater than
1102 zero even when no user-level chunks are allocated.
1103
1104 The reported current and maximum system memory can be inaccurate if
1105 a program makes other calls to system memory allocation functions
1106 (normally sbrk) outside of malloc.
1107
1108 malloc_stats prints only the most commonly interesting statistics.
1109 More information can be obtained by calling mallinfo.
1110
1111*/
1112#if __STD_C
1113void public_mSTATs();
1114#else
1115void public_mSTATs();
1116#endif
1117
1118/* mallopt tuning options */
1119
1120/*
1121 M_MXFAST is the maximum request size used for "fastbins", special bins
1122 that hold returned chunks without consolidating their spaces. This
1123 enables future requests for chunks of the same size to be handled
1124 very quickly, but can increase fragmentation, and thus increase the
1125 overall memory footprint of a program.
1126
1127 This malloc manages fastbins very conservatively yet still
1128 efficiently, so fragmentation is rarely a problem for values less
1129 than or equal to the default. The maximum supported value of MXFAST
1130 is 80. You wouldn't want it any higher than this anyway. Fastbins
1131 are designed especially for use with many small structs, objects or
1132 strings -- the default handles structs/objects/arrays with sizes up
1133 to 16 4byte fields, or small strings representing words, tokens,
1134 etc. Using fastbins for larger objects normally worsens
1135 fragmentation without improving speed.
1136
1137 M_MXFAST is set in REQUEST size units. It is internally used in
1138 chunksize units, which adds padding and alignment. You can reduce
1139 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1140 algorithm to be a closer approximation of fifo-best-fit in all cases,
1141 not just for larger requests, but will generally cause it to be
1142 slower.
1143*/
1144
1145
1146/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1147#ifndef M_MXFAST
1148#define M_MXFAST 1
1149#endif
1150
1151#ifndef DEFAULT_MXFAST
1152#define DEFAULT_MXFAST 64
1153#endif
1154
1155
1156/*
1157 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1158 to keep before releasing via malloc_trim in free().
1159
1160 Automatic trimming is mainly useful in long-lived programs.
1161 Because trimming via sbrk can be slow on some systems, and can
1162 sometimes be wasteful (in cases where programs immediately
1163 afterward allocate more large chunks) the value should be high
1164 enough so that your overall system performance would improve by
1165 releasing this much memory.
1166
1167 The trim threshold and the mmap control parameters (see below)
1168 can be traded off with one another. Trimming and mmapping are
1169 two different ways of releasing unused memory back to the
1170 system. Between these two, it is often possible to keep
1171 system-level demands of a long-lived program down to a bare
1172 minimum. For example, in one test suite of sessions measuring
1173 the XF86 X server on Linux, using a trim threshold of 128K and a
1174 mmap threshold of 192K led to near-minimal long term resource
1175 consumption.
1176
1177 If you are using this malloc in a long-lived program, it should
1178 pay to experiment with these values. As a rough guide, you
1179 might set to a value close to the average size of a process
1180 (program) running on your system. Releasing this much memory
1181 would allow such a process to run in memory. Generally, it's
1182 worth it to tune for trimming rather tham memory mapping when a
1183 program undergoes phases where several large chunks are
1184 allocated and released in ways that can reuse each other's
1185 storage, perhaps mixed with phases where there are no such
1186 chunks at all. And in well-behaved long-lived programs,
1187 controlling release of large blocks via trimming versus mapping
1188 is usually faster.
1189
1190 However, in most programs, these parameters serve mainly as
1191 protection against the system-level effects of carrying around
1192 massive amounts of unneeded memory. Since frequent calls to
1193 sbrk, mmap, and munmap otherwise degrade performance, the default
1194 parameters are set to relatively high values that serve only as
1195 safeguards.
1196
1197 The trim value must be greater than page size to have any useful
1198 effect. To disable trimming completely, you can set to
1199 (unsigned long)(-1)
1200
1201 Trim settings interact with fastbin (MXFAST) settings: Unless
1202 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1203 freeing a chunk with size less than or equal to MXFAST. Trimming is
1204 instead delayed until subsequent freeing of larger chunks. However,
1205 you can still force an attempted trim by calling malloc_trim.
1206
1207 Also, trimming is not generally possible in cases where
1208 the main arena is obtained via mmap.
1209
1210 Note that the trick some people use of mallocing a huge space and
1211 then freeing it at program startup, in an attempt to reserve system
1212 memory, doesn't have the intended effect under automatic trimming,
1213 since that memory will immediately be returned to the system.
1214*/
1215
1216#define M_TRIM_THRESHOLD -1
1217
1218#ifndef DEFAULT_TRIM_THRESHOLD
1219#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1220#endif
1221
1222/*
1223 M_TOP_PAD is the amount of extra `padding' space to allocate or
1224 retain whenever sbrk is called. It is used in two ways internally:
1225
1226 * When sbrk is called to extend the top of the arena to satisfy
1227 a new malloc request, this much padding is added to the sbrk
1228 request.
1229
1230 * When malloc_trim is called automatically from free(),
1231 it is used as the `pad' argument.
1232
1233 In both cases, the actual amount of padding is rounded
1234 so that the end of the arena is always a system page boundary.
1235
1236 The main reason for using padding is to avoid calling sbrk so
1237 often. Having even a small pad greatly reduces the likelihood
1238 that nearly every malloc request during program start-up (or
1239 after trimming) will invoke sbrk, which needlessly wastes
1240 time.
1241
1242 Automatic rounding-up to page-size units is normally sufficient
1243 to avoid measurable overhead, so the default is 0. However, in
1244 systems where sbrk is relatively slow, it can pay to increase
1245 this value, at the expense of carrying around more memory than
1246 the program needs.
1247*/
1248
1249#define M_TOP_PAD -2
1250
1251#ifndef DEFAULT_TOP_PAD
1252#define DEFAULT_TOP_PAD (0)
1253#endif
1254
1255/*
1256 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1257 to service a request. Requests of at least this size that cannot
1258 be allocated using already-existing space will be serviced via mmap.
1259 (If enough normal freed space already exists it is used instead.)
1260
1261 Using mmap segregates relatively large chunks of memory so that
1262 they can be individually obtained and released from the host
1263 system. A request serviced through mmap is never reused by any
1264 other request (at least not directly; the system may just so
1265 happen to remap successive requests to the same locations).
1266
1267 Segregating space in this way has the benefits that:
1268
1269 1. Mmapped space can ALWAYS be individually released back
1270 to the system, which helps keep the system level memory
1271 demands of a long-lived program low.
1272 2. Mapped memory can never become `locked' between
1273 other chunks, as can happen with normally allocated chunks, which
1274 means that even trimming via malloc_trim would not release them.
1275 3. On some systems with "holes" in address spaces, mmap can obtain
1276 memory that sbrk cannot.
1277
1278 However, it has the disadvantages that:
1279
1280 1. The space cannot be reclaimed, consolidated, and then
1281 used to service later requests, as happens with normal chunks.
1282 2. It can lead to more wastage because of mmap page alignment
1283 requirements
1284 3. It causes malloc performance to be more dependent on host
1285 system memory management support routines which may vary in
1286 implementation quality and may impose arbitrary
1287 limitations. Generally, servicing a request via normal
1288 malloc steps is faster than going through a system's mmap.
1289
1290 The advantages of mmap nearly always outweigh disadvantages for
1291 "large" chunks, but the value of "large" varies across systems. The
1292 default is an empirically derived value that works well in most
1293 systems.
1294*/
1295
1296#define M_MMAP_THRESHOLD -3
1297
1298#ifndef DEFAULT_MMAP_THRESHOLD
1299#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1300#endif
1301
1302/*
1303 M_MMAP_MAX is the maximum number of requests to simultaneously
1304 service using mmap. This parameter exists because
1305. Some systems have a limited number of internal tables for
1306 use by mmap, and using more than a few of them may degrade
1307 performance.
1308
1309 The default is set to a value that serves only as a safeguard.
1310 Setting to 0 disables use of mmap for servicing large requests. If
1311 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1312 to non-zero values in mallopt will fail.
1313*/
1314
1315#define M_MMAP_MAX -4
1316
1317#ifndef DEFAULT_MMAP_MAX
1318#define DEFAULT_MMAP_MAX (65536)
1319#endif
1320
1321#ifdef __cplusplus
1322}; /* end of extern "C" */
1323#endif
1324
1325/*
1326 ========================================================================
1327 To make a fully customizable malloc.h header file, cut everything
1328 above this line, put into file malloc.h, edit to suit, and #include it
1329 on the next line, as well as in programs that use this malloc.
1330 ========================================================================
1331*/
1332
1333/* #include "malloc.h" */
1334
1335/* --------------------- public wrappers ---------------------- */
1336
1337#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1338
1339/* DL_STATIC used to make functions (deep down) consistent
1340 * with prototypes (otherwise the prototypes are static
1341 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1342 * The gcc compiler doesn't care, but the HP-UX compiler does.
1343 */
1344#define DL_STATIC static
1345
1346/* Declare all routines as internal */
1347#if __STD_C
1348static Void_t* mALLOc(size_t) __attribute_malloc__;
1349static void fREe(Void_t*);
1350static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1351static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1352static int posix_mEMALIGn(Void_t**, size_t, size_t);
1353static Void_t* vALLOc(size_t) __attribute_malloc__;
1354static Void_t* pVALLOc(size_t) __attribute_malloc__;
1355static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1356static int mTRIm(size_t);
1357static size_t mUSABLe(Void_t*);
1358static void mSTATs();
1359static int mALLOPt(int, int);
1360static struct mallinfo mALLINFo(void);
1361#else
1362static Void_t* mALLOc();
1363static void fREe();
1364static Void_t* rEALLOc();
1365static Void_t* mEMALIGn();
1366static int posix_mEMALIGn();
1367static Void_t* vALLOc();
1368static Void_t* pVALLOc();
1369static Void_t* cALLOc();
1370static int mTRIm();
1371static size_t mUSABLe();
1372static void mSTATs();
1373static int mALLOPt();
1374static struct mallinfo mALLINFo();
1375#endif
1376
1377/*
1378 MALLOC_PREACTION and MALLOC_POSTACTION should be
1379 defined to return 0 on success, and nonzero on failure.
1380 The return value of MALLOC_POSTACTION is currently ignored
1381 in wrapper functions since there is no reasonable default
1382 action to take on failure.
1383*/
1384
1385
1386#ifdef USE_MALLOC_LOCK
1387
1388# ifdef WIN32
1389
1390static int mALLOC_MUTEx;
1391#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1392#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1393int dnmalloc_pthread_init(void) { return 0; }
1394
1395# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1396
1397# if defined(__NetBSD__)
1398#include <reentrant.h>
1399extern int __isthreaded;
1400static mutex_t thread_lock = MUTEX_INITIALIZER;
1401#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1402#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1403void _malloc_prefork(void) { _MALLOC_LOCK(); }
1404void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1405# endif
1406
1407# if defined(__OpenBSD__)
1408extern int __isthreaded;
1409void _thread_malloc_lock(void);
1410void _thread_malloc_unlock(void);
1411#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1412#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1413# endif
1414
1415# if defined(__FreeBSD__)
1416extern int __isthreaded;
1417struct _spinlock {
1418 volatile long access_lock;
1419 volatile long lock_owner;
1420 volatile char *fname;
1421 volatile int lineno;
1422};
1423typedef struct _spinlock spinlock_t;
1424#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1425void _spinlock(spinlock_t *);
1426void _spinunlock(spinlock_t *);
1427/* # include "/usr/src/lib/libc/include/spinlock.h" */
1428static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1429spinlock_t *__malloc_lock = &thread_lock;
1430#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1431#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1432# endif
1433
1434/* Common for all three *BSD
1435 */
1436static int malloc_active = 0;
1437static int dnmalloc_mutex_lock()
1438{
1439 _MALLOC_LOCK();
1440 if (!malloc_active)
1441 {
1442 ++malloc_active;
1443 return 0;
1444 }
1445 assert(malloc_active == 0);
1446 _MALLOC_UNLOCK();
1447 errno = EDEADLK;
1448 return 1;
1449}
1450static int dnmalloc_mutex_unlock()
1451{
1452 --malloc_active;
1453 _MALLOC_UNLOCK();
1454 return 0;
1455}
1456#define MALLOC_PREACTION dnmalloc_mutex_lock()
1457#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1458int dnmalloc_pthread_init(void) { return 0; }
1459
1460# else
1461
1462/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1463 *
1464 * Works fine on linux (no malloc in pthread_mutex_lock)
1465 * Works with on HP-UX if initialized after entering main()
1466 */
1467#include <pthread.h>
1468static int malloc_active = 0;
1469void dnmalloc_fork_prepare(void);
1470void dnmalloc_fork_parent(void);
1471void dnmalloc_fork_child(void);
1472
1473#if !defined(__linux__)
1474
1475static pthread_mutex_t mALLOC_MUTEx;
1476pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1477static int dnmalloc_use_mutex = 0;
1478static void dnmalloc_pthread_init_int(void)
1479{
1480 pthread_mutexattr_t mta;
1481 pthread_mutexattr_init(&mta);
1482 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1483 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1484 pthread_mutexattr_destroy(&mta);
1485 pthread_atfork(dnmalloc_fork_prepare,
1486 dnmalloc_fork_parent,
1487 dnmalloc_fork_child);
1488 dnmalloc_use_mutex = 1;
1489}
1490int dnmalloc_pthread_init(void)
1491{
1492 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1493}
1494
1495#else
1496
1497static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1498static int dnmalloc_use_mutex = 1;
1499int dnmalloc_pthread_init(void) {
1500 return pthread_atfork(dnmalloc_fork_prepare,
1501 dnmalloc_fork_parent,
1502 dnmalloc_fork_child);
1503}
1504#endif /* !defined(__linux__) */
1505
1506void dnmalloc_fork_prepare(void) {
1507 if (dnmalloc_use_mutex)
1508 pthread_mutex_lock(&mALLOC_MUTEx);
1509}
1510void dnmalloc_fork_parent(void) {
1511 if (dnmalloc_use_mutex)
1512 pthread_mutex_unlock(&mALLOC_MUTEx);
1513}
[174]1514void dnmalloc_fork_child(void) {
[180]1515 int rc = 0;
[171]1516#ifdef __GLIBC__
1517 if (dnmalloc_use_mutex)
[174]1518 {
1519 pthread_mutex_unlock (&mALLOC_MUTEx);
1520 pthread_mutex_destroy(&mALLOC_MUTEx);
1521 rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1522 }
[171]1523#else
1524 if (dnmalloc_use_mutex)
[174]1525 rc = pthread_mutex_unlock(&mALLOC_MUTEx);
[171]1526#endif
[174]1527 if (rc != 0)
1528 {
1529 fputs("fork_child failed", stderr);
1530 _exit(EXIT_FAILURE);
1531 }
[171]1532}
1533static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1534{
1535 if (dnmalloc_use_mutex)
1536 {
1537 int rc = pthread_mutex_lock(mutex);
1538 if (rc == 0)
1539 {
1540 if (!malloc_active)
1541 {
1542 ++malloc_active;
1543 return 0;
1544 }
1545 assert(malloc_active == 0);
1546 (void) pthread_mutex_unlock(mutex);
1547 errno = EDEADLK;
1548 return 1;
1549 }
1550 return rc;
1551 }
1552 return 0;
1553}
1554static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1555{
1556 if (dnmalloc_use_mutex)
1557 {
1558 --malloc_active;
1559 return pthread_mutex_unlock(mutex);
1560 }
1561 return 0;
1562}
1563# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1564# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1565
1566# endif
1567
1568#else
1569
1570/* Substitute anything you like for these */
1571
1572# define MALLOC_PREACTION (0)
1573# define MALLOC_POSTACTION (0)
1574int dnmalloc_pthread_init(void) { return 0; }
1575
1576#endif /* USE_MALLOC_LOCK */
1577
1578Void_t* public_mALLOc(size_t bytes) {
1579 Void_t* m;
1580 if (MALLOC_PREACTION == 0) {
1581 m = mALLOc(bytes);
1582 (void) MALLOC_POSTACTION;
1583 return m;
1584 }
1585 return 0;
1586}
1587
1588void public_fREe(Void_t* m) {
1589 if (MALLOC_PREACTION == 0) {
1590 fREe(m);
1591 (void) MALLOC_POSTACTION;
1592 }
1593}
1594
1595Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1596 if (MALLOC_PREACTION == 0) {
1597 m = rEALLOc(m, bytes);
1598 (void) MALLOC_POSTACTION;
1599 return m;
1600 }
1601 return 0;
1602}
1603
1604Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1605 Void_t* m;
1606 if (MALLOC_PREACTION == 0) {
1607 m = mEMALIGn(alignment, bytes);
1608 (void) MALLOC_POSTACTION;
1609 return m;
1610 }
1611 return 0;
1612}
1613
1614int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1615 int m, ret;
1616 if ((ret = MALLOC_PREACTION) == 0) {
1617 m = posix_mEMALIGn(memptr, alignment, bytes);
1618 (void) MALLOC_POSTACTION;
1619 return m;
1620 }
1621 return ret;
1622}
1623
1624Void_t* public_vALLOc(size_t bytes) {
1625 Void_t* m;
1626 if (MALLOC_PREACTION == 0) {
1627 m = vALLOc(bytes);
1628 (void) MALLOC_POSTACTION;
1629 return m;
1630 }
1631 return 0;
1632}
1633
1634Void_t* public_pVALLOc(size_t bytes) {
1635 Void_t* m;
1636 if (MALLOC_PREACTION == 0) {
1637 m = pVALLOc(bytes);
1638 (void) MALLOC_POSTACTION;
1639 return m;
1640 }
1641 return 0;
1642}
1643
1644Void_t* public_cALLOc(size_t n, size_t elem_size) {
1645 Void_t* m;
1646 if (MALLOC_PREACTION == 0) {
1647 m = cALLOc(n, elem_size);
1648 (void) MALLOC_POSTACTION;
1649 return m;
1650 }
1651 return 0;
1652}
1653
1654int public_mTRIm(size_t s) {
1655 int result;
1656 if (MALLOC_PREACTION == 0) {
1657 result = mTRIm(s);
1658 (void) MALLOC_POSTACTION;
1659 return result;
1660 }
1661 return 0;
1662}
1663
1664size_t public_mUSABLe(Void_t* m) {
1665 size_t result;
1666 if (MALLOC_PREACTION == 0) {
1667 result = mUSABLe(m);
1668 (void) MALLOC_POSTACTION;
1669 return result;
1670 }
1671 return 0;
1672}
1673
1674void public_mSTATs() {
1675 if (MALLOC_PREACTION == 0) {
1676 mSTATs();
1677 (void) MALLOC_POSTACTION;
1678 }
1679}
1680
1681struct mallinfo public_mALLINFo() {
1682 struct mallinfo m;
1683 if (MALLOC_PREACTION == 0) {
1684 m = mALLINFo();
1685 (void) MALLOC_POSTACTION;
1686 return m;
1687 } else {
1688 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1689 return nm;
1690 }
1691}
1692
1693int public_mALLOPt(int p, int v) {
1694 int result;
1695 if (MALLOC_PREACTION == 0) {
1696 result = mALLOPt(p, v);
1697 (void) MALLOC_POSTACTION;
1698 return result;
1699 }
1700 return 0;
1701}
1702
1703#else
1704
1705int dnmalloc_pthread_init(void) { return 0; }
1706#define DL_STATIC
1707
1708#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1709
1710
1711
1712/* ------------- Optional versions of memcopy ---------------- */
1713
1714
1715#if USE_MEMCPY
1716
1717/*
1718 Note: memcpy is ONLY invoked with non-overlapping regions,
1719 so the (usually slower) memmove is not needed.
1720*/
1721
1722#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1723#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1724
1725#else /* !USE_MEMCPY */
1726
1727/* Use Duff's device for good zeroing/copying performance. */
1728
1729#define MALLOC_ZERO(charp, nbytes) \
1730do { \
1731 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1732 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1733 long mcn; \
1734 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1735 switch (mctmp) { \
1736 case 0: for(;;) { *mzp++ = 0; \
1737 case 7: *mzp++ = 0; \
1738 case 6: *mzp++ = 0; \
1739 case 5: *mzp++ = 0; \
1740 case 4: *mzp++ = 0; \
1741 case 3: *mzp++ = 0; \
1742 case 2: *mzp++ = 0; \
1743 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1744 } \
1745} while(0)
1746
1747#define MALLOC_COPY(dest,src,nbytes) \
1748do { \
1749 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1750 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1751 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1752 long mcn; \
1753 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1754 switch (mctmp) { \
1755 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1756 case 7: *mcdst++ = *mcsrc++; \
1757 case 6: *mcdst++ = *mcsrc++; \
1758 case 5: *mcdst++ = *mcsrc++; \
1759 case 4: *mcdst++ = *mcsrc++; \
1760 case 3: *mcdst++ = *mcsrc++; \
1761 case 2: *mcdst++ = *mcsrc++; \
1762 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1763 } \
1764} while(0)
1765
1766#endif
1767
1768/* ------------------ MMAP support ------------------ */
1769
1770
1771#if defined(HAVE_FCNTL_H)
1772#include <fcntl.h>
1773#endif
1774
1775#if defined(HAVE_SYS_MMAN_H)
1776#include <sys/mman.h>
1777#endif
1778
1779#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1780#define MAP_ANONYMOUS MAP_ANON
1781#endif
1782
1783/*
1784 Nearly all versions of mmap support MAP_ANONYMOUS,
1785 so the following is unlikely to be needed, but is
1786 supplied just in case.
1787*/
1788
1789#ifndef MAP_ANONYMOUS
1790
1791/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1792 */
1793void * anon_mmap (void *addr, size_t length, int prot, int flags)
1794{
1795 void * retval = NULL;
1796 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1797
1798 dev_zero_fd = open("/dev/zero", O_RDWR);
1799 if (dev_zero_fd >= 0)
1800 {
1801 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1802 /* closing the file descriptor does not unmap the region */
1803 close(dev_zero_fd);
1804 }
1805 return retval;
1806}
1807
1808#define MMAP(addr, size, prot, flags) \
1809 (anon_mmap((addr), (size), (prot), (flags)))
1810
1811
1812#else /* have MAP_ANONYMOUS */
1813
1814#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1815#define MAP_32BIT MAP_ADDR32
1816#endif
1817
1818#if defined(MAP_32BIT)
1819#define MMAP(addr, size, prot, flags) \
1820 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1821#elif defined(__sun)
1822/*
1823 * Hint an address within 32bit address space
1824 */
1825#define MMAP(addr, size, prot, flags) \
1826 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1827#else
1828/* *BSD */
1829#define MMAP(addr, size, prot, flags) \
1830 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1831#endif
1832
1833#endif /* have MAP_ANONYMOUS */
1834
1835
1836/*
1837 ----------------------- Chunk representations -----------------------
1838*/
1839
1840typedef void * mchunkptr;
1841
1842struct chunkinfo {
1843 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1844 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1845 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1846 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1847 in the linked list if the hash
1848 value is the same as the chunk */
1849 struct chunkinfo* fd; /* double links -- used only if free. */
1850 struct chunkinfo* bk;
1851 mchunkptr chunk;
1852};
1853
1854typedef struct chunkinfo* chunkinfoptr;
1855
1856struct cireginfo {
1857 unsigned long position;
1858 unsigned long *freebitmap;
1859 struct cireginfo* next;
1860 struct chunkinfo *freelist;
1861 struct chunkinfo *begin;
1862 unsigned long freecounter;
1863};
1864
1865/*
1866 ---------- Size and alignment checks and conversions ----------
1867*/
1868
1869/* conversion from malloc headers to user pointers, and back */
1870#define chunk(p) (p->chunk)
1871
1872
1873#define chunk2mem(p) (chunk(p))
1874#define mem2chunk(mem) (hashtable_lookup(mem))
1875
[187]1876/* The smallest possible chunk */
1877/* #define MIN_CHUNK_SIZE 16 */
1878#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
1879# define MIN_CHUNK_SIZE 32
1880#else
1881# define MIN_CHUNK_SIZE 16
1882#endif
[171]1883
1884/* The smallest size we can malloc is an aligned minimal chunk */
1885
1886#define MINSIZE \
1887 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1888
1889/* Check if m has acceptable alignment */
1890
1891#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1892
1893#define GUARD_SIZE 4
1894
1895/*
1896 Check if a request is so large that it would wrap around zero when
1897 padded and aligned. To simplify some other code, the bound is made
1898 low enough so that adding MINSIZE will also not wrap around zero.
1899
1900 Make it 4*MINSIZE.
1901*/
1902
1903#define REQUEST_OUT_OF_RANGE(req) \
1904 ((CHUNK_SIZE_T)(req) >= \
1905 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1906
1907/* pad request bytes into a usable size -- internal version */
1908
1909#define request2size(req) \
1910 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1911 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1912 MINSIZE)
1913
1914/* Same, except also perform argument check */
1915
1916#define checked_request2size(req, sz) \
1917 if (!REQUEST_OUT_OF_RANGE(req)) { \
1918 (sz) = request2size(req); \
1919 assert((sz-req) >= GUARD_SIZE); \
1920 } else { \
1921 MALLOC_FAILURE_ACTION; \
1922 return 0; \
1923 }
1924
1925#if PARANOIA > 2
1926static char * guard_set_p;
1927static char * guard_set_q;
1928
1929#define guard_set(guard, P, request, sz) \
1930 assert((sz-request) >= GUARD_SIZE); \
1931 guard_set_p = (char*)(chunk(P)); \
1932 guard_set_p += request; \
[172]1933 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
[171]1934 guard_set_q = (char*)(guard); \
1935 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1936 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1937 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1938 *guard_set_p = *guard_set_q; \
[172]1939 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
[171]1940 (P)->req = request
1941
1942#define guard_check(guard, P) \
[172]1943 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1944 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1945 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
[171]1946
1947#else
1948#define guard_set(guard, P, request, sz) ((void)0)
1949#define guard_check(guard, P) ((void)0)
1950#endif /* PARANOIA > 2 */
1951
1952/* dnmalloc forward declarations */
1953static char * dnmalloc_arc4random(void);
1954static void dnmalloc_init (void);
1955static void malloc_mmap_state(void);
1956static void cireg_extend (void);
1957static chunkinfoptr cireg_getfree (void);
1958static void hashtable_add (chunkinfoptr ci);
1959static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1960static void hashtable_remove (mchunkptr p);
1961static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1962static chunkinfoptr hashtable_lookup (mchunkptr p);
1963static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1964static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1965
1966
1967
1968/*
1969 --------------- Physical chunk operations ---------------
1970*/
1971
1972
1973/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1974#define PREV_INUSE 0x1
1975
1976/* extract inuse bit of previous chunk */
1977#define prev_inuse(p) ((p)->size & PREV_INUSE)
1978
1979/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1980#define IS_MMAPPED 0x2
1981
1982/* check for mmap()'ed chunk */
1983#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1984
1985
1986/* size field is or'ed when the chunk is in use */
1987#define INUSE 0x4
1988
1989/* extract inuse bit of chunk */
1990#define inuse(p) ((p)->size & INUSE)
1991
1992/*
1993 Bits to mask off when extracting size
1994
1995 Note: IS_MMAPPED is intentionally not masked off from size field in
1996 macros for which mmapped chunks should never be seen. This should
1997 cause helpful core dumps to occur if it is tried by accident by
1998 people extending or adapting this malloc.
1999*/
2000#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
2001
2002/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
2003#define SIZE_NOMMAP (PREV_INUSE|INUSE)
2004
2005/* Get size, ignoring use bits */
2006#define chunksize(p) ((p)->size & ~(SIZE_BITS))
2007
2008/* Ptr to chunkinfo of next physical malloc_chunk. */
2009#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2010
2011/* Treat space at ptr + offset as a chunk */
2012#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2013
2014/* set/clear chunk as being inuse without otherwise disturbing */
2015#define set_inuse(p) ((p)->size |= INUSE)
2016
2017#define clear_inuse(p) ((p)->size &= ~(INUSE))
2018
2019#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2020
2021#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2022
2023static void set_previnuse_next (chunkinfoptr p)
2024{
2025 chunkinfoptr q;
2026 q = next_chunkinfo (p);
2027 if (q)
2028 set_previnuse (q);
2029}
2030
2031#define set_all_inuse(p) \
2032set_inuse(p); \
2033set_previnuse_next(p);
2034
2035
2036/* Set size at head, without disturbing its use bit */
2037#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2038
2039/* Set size/use field */
2040#define set_head(p, s) ((p)->size = (s))
2041
2042/*
2043 Bins
2044
2045 An array of bin headers for free chunks. Each bin is doubly
2046 linked. The bins are approximately proportionally (log) spaced.
2047 There are a lot of these bins (128). This may look excessive, but
2048 works very well in practice. Most bins hold sizes that are
2049 unusual as malloc request sizes, but are more usual for fragments
2050 and consolidated sets of chunks, which is what these bins hold, so
2051 they can be found quickly. All procedures maintain the invariant
2052 that no consolidated chunk physically borders another one, so each
2053 chunk in a list is known to be preceeded and followed by either
2054 inuse chunks or the ends of memory.
2055
2056 Chunks in bins are kept in size order, with ties going to the
2057 approximately least recently used chunk. Ordering isn't needed
2058 for the small bins, which all contain the same-sized chunks, but
2059 facilitates best-fit allocation for larger chunks. These lists
2060 are just sequential. Keeping them in order almost never requires
2061 enough traversal to warrant using fancier ordered data
2062 structures.
2063
2064 Chunks of the same size are linked with the most
2065 recently freed at the front, and allocations are taken from the
2066 back. This results in LRU (FIFO) allocation order, which tends
2067 to give each chunk an equal opportunity to be consolidated with
2068 adjacent freed chunks, resulting in larger free chunks and less
2069 fragmentation.
2070
2071 To simplify use in double-linked lists, each bin header acts
2072 as a malloc_chunk. This avoids special-casing for headers.
2073 But to conserve space and improve locality, we allocate
2074 only the fd/bk pointers of bins, and then use repositioning tricks
2075 to treat these as the fields of a malloc_chunk*.
2076*/
2077
2078typedef struct chunkinfo* mbinptr;
2079
2080/* addressing -- note that bin_at(0) does not exist */
2081#define bin_at(m, i) (&(m)->bins[i])
2082
2083/* analog of ++bin */
2084#define next_bin(b) (b+1)
2085
2086/* Reminders about list directionality within bins */
2087#define first(b) ((b)->fd)
2088#define last(b) ((b)->bk)
2089
2090/* Take a chunk off a bin list */
2091#define unlink(P, BK, FD) { \
2092 FD = P->fd; \
2093 BK = P->bk; \
2094 FD->bk = BK; \
2095 BK->fd = FD; \
2096}
2097
2098/*
2099 Indexing
2100
2101 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2102 8 bytes apart. Larger bins are approximately logarithmically spaced:
2103
2104 64 bins of size 8
2105 32 bins of size 64
2106 16 bins of size 512
2107 8 bins of size 4096
2108 4 bins of size 32768
2109 2 bins of size 262144
2110 1 bin of size what's left
2111
2112 The bins top out around 1MB because we expect to service large
2113 requests via mmap.
2114*/
2115
2116#define NBINS 96
2117#define NSMALLBINS 32
2118#define SMALLBIN_WIDTH 8
2119#define MIN_LARGE_SIZE 256
2120
2121#define in_smallbin_range(sz) \
2122 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2123
2124#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2125
2126/*
2127 Compute index for size. We expect this to be inlined when
2128 compiled with optimization, else not, which works out well.
2129*/
2130static int largebin_index(size_t sz) {
2131
2132 unsigned long xx = sz >> SMALLBIN_WIDTH;
2133
2134 if (xx < 0x10000)
2135 {
2136 unsigned int m; /* bit position of highest set bit of m */
2137
2138 /* On intel, use BSRL instruction to find highest bit */
2139#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2140
2141 unsigned int x = (unsigned int) xx;
2142
2143 __asm__("bsrl %1,%0\n\t"
2144 : "=r" (m)
2145 : "rm" (x));
2146
2147#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2148
2149 __asm__("bsrq %1,%0\n\t"
2150 : "=r" (m)
2151 : "rm" (xx));
2152
2153#else
2154
2155 /* Taken from Bit Twiddling Hacks
2156 * http://graphics.stanford.edu/~seander/bithacks.html
2157 * public domain
2158 */
2159 unsigned int v = (unsigned int) xx;
2160 register unsigned int shift;
2161
2162 m = (v > 0xFFFF) << 4; v >>= m;
2163 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2164 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2165 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2166 m |= (v >> 1);
2167
2168#endif
2169
2170 /* Use next 2 bits to create finer-granularity bins */
2171 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2172 }
2173 else
2174 {
2175 return NBINS-1;
2176 }
2177}
2178
2179#define bin_index(sz) \
2180 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2181
2182/*
2183 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2184 first bin that is maintained in sorted order. This must
2185 be the smallest size corresponding to a given bin.
2186
2187 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2188 best fit guarantees to sometimes speed up malloc by increasing value.
2189 Doing this means that malloc may choose a chunk that is
2190 non-best-fitting by up to the width of the bin.
2191
2192 Some useful cutoff values:
2193 512 - all bins sorted
2194 2560 - leaves bins <= 64 bytes wide unsorted
2195 12288 - leaves bins <= 512 bytes wide unsorted
2196 65536 - leaves bins <= 4096 bytes wide unsorted
2197 262144 - leaves bins <= 32768 bytes wide unsorted
2198 -1 - no bins sorted (not recommended!)
2199*/
2200
2201/* #define FIRST_SORTED_BIN_SIZE 65536 */
2202
2203#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2204
[172]2205
[171]2206/*
2207 Unsorted chunks
2208
2209 All remainders from chunk splits, as well as all returned chunks,
2210 are first placed in the "unsorted" bin. They are then placed
2211 in regular bins after malloc gives them ONE chance to be used before
2212 binning. So, basically, the unsorted_chunks list acts as a queue,
2213 with chunks being placed on it in free (and malloc_consolidate),
2214 and taken off (to be either used or placed in bins) in malloc.
2215*/
2216
2217/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2218#define unsorted_chunks(M) (bin_at(M, 1))
2219
2220/*
2221 Top
2222
2223 The top-most available chunk (i.e., the one bordering the end of
2224 available memory) is treated specially. It is never included in
2225 any bin, is used only if no other chunk is available, and is
2226 released back to the system if it is very large (see
2227 M_TRIM_THRESHOLD). Because top initially
2228 points to its own bin with initial zero size, thus forcing
2229 extension on the first malloc request, we avoid having any special
2230 code in malloc to check whether it even exists yet. But we still
2231 need to do so when getting memory from system, so we make
2232 initial_top treat the bin as a legal but unusable chunk during the
2233 interval between initialization and the first call to
2234 sYSMALLOc. (This is somewhat delicate, since it relies on
2235 the 2 preceding words to be zero during this interval as well.)
2236*/
2237
2238/* Conveniently, the unsorted bin can be used as dummy top on first call */
2239#define initial_top(M) (unsorted_chunks(M))
2240
2241/*
2242 Binmap
2243
2244 To help compensate for the large number of bins, a one-level index
2245 structure is used for bin-by-bin searching. `binmap' is a
2246 bitvector recording whether bins are definitely empty so they can
2247 be skipped over during during traversals. The bits are NOT always
2248 cleared as soon as bins are empty, but instead only
2249 when they are noticed to be empty during traversal in malloc.
2250*/
2251
2252/* Conservatively use 32 bits per map word, even if on 64bit system */
2253#define BINMAPSHIFT 5
2254#define BITSPERMAP (1U << BINMAPSHIFT)
2255#define BINMAPSIZE (NBINS / BITSPERMAP)
2256
2257#define idx2block(i) ((i) >> BINMAPSHIFT)
2258#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2259
2260#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2261#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2262#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2263
2264/*
2265 Fastbins
2266
2267 An array of lists holding recently freed small chunks. Fastbins
2268 are not doubly linked. It is faster to single-link them, and
2269 since chunks are never removed from the middles of these lists,
2270 double linking is not necessary. Also, unlike regular bins, they
2271 are not even processed in FIFO order (they use faster LIFO) since
2272 ordering doesn't much matter in the transient contexts in which
2273 fastbins are normally used.
2274
2275 Chunks in fastbins keep their inuse bit set, so they cannot
2276 be consolidated with other free chunks. malloc_consolidate
2277 releases all chunks in fastbins and consolidates them with
2278 other free chunks.
2279*/
2280
2281typedef struct chunkinfo* mfastbinptr;
2282
2283/* offset 2 to use otherwise unindexable first 2 bins */
2284#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2285
2286/* The maximum fastbin request size we support */
2287#define MAX_FAST_SIZE 80
2288
2289#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2290
2291/*
2292 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2293 that triggers automatic consolidation of possibly-surrounding
2294 fastbin chunks. This is a heuristic, so the exact value should not
2295 matter too much. It is defined at half the default trim threshold as a
2296 compromise heuristic to only attempt consolidation if it is likely
2297 to lead to trimming. However, it is not dynamically tunable, since
2298 consolidation reduces fragmentation surrounding loarge chunks even
2299 if trimming is not used.
2300*/
2301
2302#define FASTBIN_CONSOLIDATION_THRESHOLD \
2303 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2304
2305/*
2306 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2307 they are used as flags.
2308*/
2309
2310/*
2311 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2312 freed chunks at all. It is set true when entering a chunk into any
2313 bin.
2314*/
2315
2316#define ANYCHUNKS_BIT (1U)
2317
2318#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2319#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2320#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2321
2322/*
2323 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2324 some fastbin chunks. It is set true on entering a chunk into any
2325 fastbin, and cleared only in malloc_consolidate.
2326*/
2327
2328#define FASTCHUNKS_BIT (2U)
2329
2330#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2331#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2332#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2333
2334/*
2335 Set value of max_fast.
2336 Use impossibly small value if 0.
2337*/
2338
2339#define set_max_fast(M, s) \
2340 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2341 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2342
2343#define get_max_fast(M) \
2344 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2345
2346
2347/*
2348 morecore_properties is a status word holding dynamically discovered
2349 or controlled properties of the morecore function
2350*/
2351
2352#define MORECORE_CONTIGUOUS_BIT (1U)
2353
2354#define contiguous(M) \
2355 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2356#define noncontiguous(M) \
2357 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2358#define set_contiguous(M) \
2359 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2360#define set_noncontiguous(M) \
2361 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2362
2363#define MORECORE_32BIT_BIT (2U)
2364
2365#define morecore32bit(M) \
2366 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2367#define nonmorecore32bit(M) \
2368 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2369#define set_morecore32bit(M) \
2370 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2371#define set_nonmorecore32bit(M) \
2372 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2373
2374
2375
2376/* ----------------- dnmalloc -------------------- */
2377
2378/* size of pages */
2379#define PGSIZE malloc_getpagesize
2380/* pointer size */
2381#define PTRSIZE sizeof(long)
2382
2383
2384
2385/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2386 address space: the hashtable has granularity of 16*8, set it to something
2387 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2388 page)
2389*/
2390
2391/* The maximum heap size that dnmalloc can operate with
2392 * represented in hex to avoid annoying gcc warning
2393 *
2394 * Avoid integer overflow, cover complete 32bit address
2395 * space for portability. With deferred allocation, the
2396 * hashtable size is a non-issue.
2397 */
2398#define HEAPMAXSIZE_HALF 0x80000000UL
2399
2400/* How many elements are stored in the linked list */
2401#define LINKEDLSTELS 8
2402
2403/* Minimum size of a chunk */
2404
[172]2405#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2406# define MINCHUNKSIZE 32
2407#else
2408# define MINCHUNKSIZE 16
2409#endif
2410
2411
[171]2412/* The amount of hashtable entries for each page:
2413Pagesize divded by the numer of elements in the linkedlists
2414divided by the minimum chunk size
2415*/
2416#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2417
2418/* The amount of hashtable entries needed to manage the memory:
2419Maximum heap size divided by page size multiplied by the amount
2420of chunk info's per page
2421*/
2422#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2423
2424/* Initial size of the map for the hashtable
2425Amount of entries muliplied by pointer size
2426*/
2427#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2428
2429/* Amount of free chunks that the system should allocate at the start */
2430#define NUMBER_FREE_CHUNKS 32768
2431
2432/* Initial size of the chunk info region,
2433also used when growing the region */
2434#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2435
2436/* Start address of the heap */
2437char *startheap;
2438
2439/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2440chunkinfoptr *hashtable;
2441
2442/* Current chunkinfo region */
2443struct cireginfo *currciinfo = 0;
2444struct cireginfo *firstciinfo = 0;
2445
2446unsigned long totalcictr = 0;
2447
2448
2449/* Initialize the area for chunkinfos and the hashtable and protect
2450 * it with non-writable pages
2451 */
2452static void
2453dnmalloc_init ()
2454{
2455 void *hashtb;
2456 int mprot;
2457 int flags = MAP_PRIVATE;
2458
2459 /* Allocate the malloc_state struct */
2460 malloc_mmap_state();
2461
2462 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2463 * systems use defered allocation anyway.
2464 */
2465#ifdef MAP_NORESERVE
2466 flags |= MAP_NORESERVE;
2467#endif
2468
2469 /* Always start at 0, hashtable covers whole 32bit address space
2470 */
2471#define STARTHEAP_IS_ZERO
2472 startheap = 0;
2473
2474 /* Map space for the hashtable */
2475#if PARANOIA > 1
2476 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2477#else
2478 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2479#endif
2480
2481#ifdef NDEBUG
2482 if (hashtb == MAP_FAILED) {
2483 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2484 abort ();
2485 }
2486#else
2487 assert(hashtb != MAP_FAILED);
2488#endif
2489
2490 /* Protect the hashtable with non-writable pages */
2491 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2492#ifdef NDEBUG
2493 if (mprot == -1) {
2494 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2495 strerror (errno));
2496 abort ();
2497 }
2498#else
2499 assert(mprot != -1);
2500#endif
2501
2502 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2503 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2504
2505 /* Protect the hashtable with non-writable pages */
2506#if PARANOIA > 1
2507 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2508#ifdef NDEBUG
2509 if (mprot == -1) {
2510 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2511 strerror (errno));
2512 abort ();
2513 }
2514#else
2515 assert(mprot != -1);
2516#endif
2517#endif
2518}
2519
2520
2521
2522/* Extend the region for chunk infos by mapping more memory before the region */
2523static void
2524cireg_extend ()
2525{
2526 void *newcireg;
2527 int mprot;
2528 struct cireginfo *tempciinfo = 0;
2529
2530#if PARANOIA > 1
2531 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2532#else
2533 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2534#endif
2535
2536#ifdef NDEBUG
2537 if (newcireg == MAP_FAILED)
2538 {
2539 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2540 strerror (errno));
2541 abort ();
2542 }
2543#else
2544 assert(newcireg != MAP_FAILED);
2545#endif
2546 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2547#ifdef NDEBUG
2548 if (mprot == -1) {
2549 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2550 strerror (errno));
2551 abort ();
2552 }
2553#else
2554 assert(mprot != -1);
2555#endif
2556 newcireg = ((char*)newcireg)+PGSIZE;
2557
2558#if PARANOIA > 1
2559 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2560#ifdef NDEBUG
2561 if (mprot == -1) {
2562 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2563 strerror (errno));
2564 abort ();
2565 }
2566#else
2567 assert(mprot != -1);
2568#endif
2569#endif
2570
2571 tempciinfo = currciinfo;
2572 currciinfo = (struct cireginfo *) newcireg;
2573 if (tempciinfo)
2574 tempciinfo->next = currciinfo;
2575 currciinfo->position = 1;
2576 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2577 if (!firstciinfo)
2578 firstciinfo = currciinfo;
2579 totalcictr++;
[172]2580 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
[171]2581}
2582
2583
2584/* Get a free chunkinfo */
2585static chunkinfoptr
2586cireg_getfree ()
2587{
2588 chunkinfoptr freeci;
2589 chunkinfoptr freelst = 0;
2590 struct cireginfo *newciinfo = firstciinfo;
2591
2592 if (newciinfo) {
2593 freelst = newciinfo->freelist;
2594
2595 if (!freelst && newciinfo->next) {
2596 do {
2597 newciinfo = newciinfo->next;
2598 freelst = newciinfo->freelist;
2599 } while (!freelst && newciinfo->next);
2600 }
2601 }
2602
2603 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2604 if (freelst)
2605 {
[172]2606 freeci = freelst;
2607 newciinfo->freecounter--;
2608 newciinfo->freelist = freelst->fd;
2609
2610 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2611 sizeof(struct chunkinfo));
2612
2613 freeci->prev_size = 0;
2614 freeci->size = 0;
2615 freeci->req = 0;
2616 freeci->hash_next = NULL;
2617 freeci->fd = NULL;
2618 freeci->bk = NULL;
2619 freeci->chunk = NULL;
2620 return (freeci);
[171]2621 }
2622 else
2623 {
2624 /* No free chunkinfos, check if chunkinfo region still has place
2625 * for a chunkinfo. If not, extend the region.
2626 */
2627 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2628 cireg_extend ();
2629 /* Get a chunkinfo from the chunkinfo region */
2630 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2631 currciinfo->freecounter--;
2632 currciinfo->position++;
[172]2633
2634 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2635 sizeof(struct chunkinfo));
2636
[171]2637 return (freeci);
2638 }
2639}
2640
2641static void freeciregion(struct cireginfo *freeme) {
2642 /* free the chunkinfo region */
2643 struct cireginfo *newciinfo = firstciinfo;
2644 struct cireginfo *prevciinfo = firstciinfo;
2645 void *unmapme;
[172]2646
[171]2647 while (newciinfo && newciinfo != freeme) {
2648 prevciinfo = newciinfo;
2649 newciinfo = newciinfo->next;
2650 }
2651 assert(freeme == newciinfo); /* rw */
2652 assert(newciinfo != NULL); /* rw */
2653 if (newciinfo)
2654 prevciinfo->next = newciinfo->next;
2655 unmapme = (void *) ((char*)freeme - PGSIZE);
[172]2656 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
[171]2657#if PARANOIA > 1
2658 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2659#else
2660 munmap(unmapme, CIREGSIZE+PGSIZE);
2661#endif
2662}
2663
2664
2665static void freecilst_add(chunkinfoptr p) {
2666
2667 struct cireginfo *newciinfo;
[172]2668
[171]2669 newciinfo = currciinfo;
2670 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2671 p->fd = newciinfo->freelist;
2672 newciinfo->freelist = p;
2673 newciinfo->freecounter++;
[172]2674 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2675 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2676 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2677 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2678 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2679 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2680 } else {
2681 newciinfo = firstciinfo;
2682 if (newciinfo) {
2683 do {
2684 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2685 p->fd = newciinfo->freelist;
2686 newciinfo->freelist = p;
2687 newciinfo->freecounter++;
[172]2688 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2689 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2690 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2691 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2692 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2693 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2694 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2695 freeciregion(newciinfo);
2696 break;
2697 }
2698 newciinfo = newciinfo->next;
2699 } while (newciinfo);
2700 }
2701 }
2702}
2703
2704/* Calculate the hash table entry for a chunk */
2705#ifdef STARTHEAP_IS_ZERO
2706#define hash(p) (((unsigned long) p) >> 7)
2707#else
2708#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2709#endif
2710
2711static void
2712hashtable_add (chunkinfoptr ci)
2713{
2714 chunkinfoptr temp, next;
2715 unsigned long hashval;
2716 mchunkptr cic = chunk (ci);
2717
2718 hashval = hash (cic);
2719
2720 if (hashval < AMOUNTHASH) {
2721
2722 temp = hashtable[hashval];
2723
2724#ifdef DNMALLOC_DEBUG
2725 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2726#endif
2727
2728 /* If no pointer to a chunk info list is stored at this location
2729 * in the hashtable or if the chunk's address is smaller than the
2730 * one present, add the chunk to the front of the linked list
2731 */
2732 if (temp == 0 || chunk (temp) > cic)
2733 {
2734 ci->hash_next = temp;
2735 hashtable[hashval] = ci;
2736 if (!temp) /* more likely case */
2737 goto out;
2738 temp->prev_size = chunksize(ci);
2739 return;
2740 }
2741 else
2742 {
2743 /* We must place the chunk in the linked list for this hashentry
2744 * Loop to end of list or to a position where temp's chunk's address
2745 * is larger than the new chunkinfo's chunk's address
2746 */
2747 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2748 {
2749 ci->hash_next = temp->hash_next;
2750 temp->hash_next = ci;
2751 }
2752 else
2753 {
2754 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2755 {
2756 temp = temp->hash_next;
2757 }
2758 /* Place in linked list if not already there */
2759 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2760 {
2761 ci->hash_next = temp->hash_next;
2762 temp->hash_next = ci;
2763 }
2764 }
2765 }
2766 }
2767 else {
2768#ifdef DNMALLOC_CHECKS
2769 if (hashval >= AMOUNTHASH) {
2770 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2771 abort();
2772 }
2773#else
2774 assert(hashval < AMOUNTHASH);
2775#endif
2776 }
2777
2778 out:
2779 next = next_chunkinfo(ci);
2780 if (!next)
2781 return;
2782 next->prev_size = chunksize(ci);
2783}
2784
2785static void
2786hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2787{
2788 chunkinfoptr next;
2789
2790#ifdef DNMALLOC_DEBUG
2791 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2792 (unsigned long)hash(chunk(ci_insert));
2793#endif
2794
2795 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2796 hashtable_add(ci_insert);
2797 }
2798 else {
[172]2799
[171]2800 ci_insert->hash_next = ci_orig->hash_next;
2801 ci_orig->hash_next = ci_insert;
2802
2803 /* added for prevsize */
2804 if (!(ci_insert->hash_next))
2805 next = next_chunkinfo(ci_insert);
2806 else
2807 next = ci_insert->hash_next;
2808
2809 if (!next)
2810 {
2811 ci_insert->prev_size = chunksize(ci_orig);
2812 }
2813 else
2814 {
2815 next->prev_size = chunksize(ci_insert);
2816 ci_insert->prev_size = chunksize(ci_orig);
2817 }
2818 }
2819}
2820
2821static void
2822hashtable_remove (mchunkptr p)
2823{
2824 chunkinfoptr prevtemp, temp;
2825 unsigned long hashval;
2826
2827 hashval = hash (p);
2828#ifdef DNMALLOC_DEBUG
2829 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2830#endif
2831 assert(hashval < AMOUNTHASH); /* rw */
2832 prevtemp = temp = hashtable[hashval];
2833 if (chunk (temp) == p) {
2834 hashtable[hashval] = temp->hash_next;
2835 }
2836 else
2837 {
2838 if (temp && chunk (temp) != p) {
2839 do
2840 {
2841 prevtemp = temp;
2842 temp = temp->hash_next;
2843 } while (temp && chunk (temp) != p);
2844 }
2845#ifdef DNMALLOC_CHECKS
2846 if (!temp) {
2847 fprintf (stderr,
2848 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2849 p, hashval);
2850 abort();
2851 }
2852#else
2853 assert(temp != NULL);
2854#endif
2855 prevtemp->hash_next = temp->hash_next;
2856 }
2857}
2858
[172]2859/* mmapped chunks are multiples of pagesize, no hash_nexts,
2860 * just remove from the hashtable
2861 */
[171]2862#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2863
2864static void
2865hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2866{
2867 unsigned long hashval;
2868 chunkinfoptr next;
2869
2870#ifdef DNMALLOC_DEBUG
[172]2871 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
[171]2872#endif
2873
2874 if (ci_orig->hash_next != ci_todelete) {
2875 hashval = hash(chunk(ci_todelete));
2876 assert(hashval < AMOUNTHASH); /* rw */
2877#ifdef DNMALLOC_CHECKS
2878 if (hashtable[hashval] != ci_todelete ) {
2879 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2880 }
2881#else
2882 assert(hashtable[hashval] == ci_todelete);
2883#endif
2884 hashtable[hashval] = ci_todelete->hash_next;
2885 }
2886
2887 else {
2888 ci_orig->hash_next = ci_todelete->hash_next;
2889 if (!ci_orig->hash_next) {
2890 next = next_chunkinfo(ci_orig);
2891 } else {
2892 next = ci_orig->hash_next;
2893 }
2894 if (next)
2895 next->prev_size = chunksize(ci_orig);
2896
2897 }
2898}
2899
2900
2901static chunkinfoptr
2902hashtable_lookup (mchunkptr p)
2903{
2904 chunkinfoptr ci;
2905 unsigned long hashval;
2906
2907 /* if we were called wrongly
2908 * if ((char *) p < startheap) return 0;
2909 */
2910 if ((char *) p >= startheap)
2911 {
2912 hashval = hash (p);
2913 assert(hashval < AMOUNTHASH); /* rw */
2914 ci = hashtable[hashval];
2915 if (ci && chunk (ci) == p)
2916 return ci;
2917
2918 if (ci) {
2919 do {
2920 ci = ci->hash_next;
2921 } while (ci && chunk (ci) != p);
2922 }
2923#ifdef DNMALLOC_CHECKS
[237]2924 /* This should never occur but if it does, we'd like to know */
[171]2925 if (!ci) {
2926 fprintf (stderr,
2927 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2928 p, hashval);
2929 abort();
2930 }
2931#else
2932 assert(ci != NULL);
2933#endif
2934 return ci;
2935 }
2936 return 0;
2937}
2938
2939
2940
2941/*
2942 ----------- Internal state representation and initialization -----------
2943*/
2944
2945struct malloc_state {
2946
2947 /* The maximum chunk size to be eligible for fastbin */
2948 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2949
2950 /* Fastbins */
2951 mfastbinptr fastbins[NFASTBINS];
2952
2953 /* Base of the topmost chunk -- not otherwise kept in a bin */
2954 chunkinfoptr top;
2955
2956 /* The remainder from the most recent split of a small request */
2957 chunkinfoptr last_remainder;
2958
2959 /* Normal bins */
2960 struct chunkinfo bins[NBINS];
2961
2962 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2963 unsigned int binmap[BINMAPSIZE+1];
2964
2965 /* Tunable parameters */
2966 CHUNK_SIZE_T trim_threshold;
2967 INTERNAL_SIZE_T top_pad;
2968 INTERNAL_SIZE_T mmap_threshold;
2969
2970 /* Memory map support */
2971 int n_mmaps;
2972 int n_mmaps_max;
2973 int max_n_mmaps;
2974
2975 /* Cache malloc_getpagesize */
2976 unsigned int pagesize;
2977
2978 /* Canary */
2979 char guard_stored[GUARD_SIZE];
2980
2981 /* Track properties of MORECORE */
2982 unsigned int morecore_properties;
2983
2984 /* Statistics */
2985 INTERNAL_SIZE_T mmapped_mem;
2986 INTERNAL_SIZE_T sbrked_mem;
2987 INTERNAL_SIZE_T max_sbrked_mem;
2988 INTERNAL_SIZE_T max_mmapped_mem;
2989 INTERNAL_SIZE_T max_total_mem;
2990};
2991
2992typedef struct malloc_state *mstate;
2993
2994/*
2995 There is exactly one instance of this struct in this malloc.
2996 If you are adapting this malloc in a way that does NOT use a static
2997 malloc_state, you MUST explicitly zero-fill it before using. This
2998 malloc relies on the property that malloc_state is initialized to
2999 all zeroes (as is true of C statics).
3000*/
3001
3002static struct malloc_state * av_ = NULL; /* never directly referenced */
3003
3004/*
3005 All uses of av_ are via get_malloc_state().
3006 At most one "call" to get_malloc_state is made per invocation of
3007 the public versions of malloc and free, but other routines
3008 that in turn invoke malloc and/or free may call more then once.
3009 Also, it is called in check* routines if DEBUG is set.
3010*/
3011
3012#define get_malloc_state() (av_)
3013
3014/*
3015 Initialize a malloc_state struct.
3016
3017 This is called only from within malloc_consolidate, which needs
3018 be called in the same contexts anyway. It is never called directly
3019 outside of malloc_consolidate because some optimizing compilers try
3020 to inline it at all call points, which turns out not to be an
3021 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3022*/
3023
3024#if __STD_C
3025static void malloc_mmap_state(void)
3026#else
3027static void malloc_mmap_state()
3028#endif
3029{
3030 int mprot;
3031 unsigned long pagesize = malloc_getpagesize;
3032 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3033
3034 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3035
3036
3037#ifdef NDEBUG
3038 if (foo == MAP_FAILED) {
3039 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3040 abort ();
3041 }
3042#else
3043 assert(foo != MAP_FAILED);
3044#endif
3045
3046 mprot = mprotect(foo, pagesize, PROT_NONE);
3047#ifdef NDEBUG
3048 if (mprot == -1) {
3049 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3050 strerror (errno));
3051 abort ();
3052 }
3053#else
3054 assert(mprot != -1);
3055#endif
3056
3057 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3058
3059 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3060
3061 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3062#ifdef NDEBUG
3063 if (mprot == -1) {
3064 fprintf (stderr,
3065 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3066 strerror (errno));
3067 abort ();
3068 }
3069#else
3070 assert(mprot != -1);
3071#endif
3072}
3073
3074#if __STD_C
3075static void malloc_init_state(mstate av)
3076#else
3077static void malloc_init_state(av) mstate av;
3078#endif
3079{
3080 int i;
3081 mbinptr bin;
3082
3083 void * morecore_test = MORECORE(0);
3084 unsigned long hashval;
3085
3086 /* Test morecore function
3087 */
3088 set_morecore32bit(av);
3089
3090 if (morecore_test == MORECORE_FAILURE)
3091 {
3092 set_nonmorecore32bit(av);
3093 }
3094 else
3095 {
3096 /* On 64bit systems, the heap may be located above the
3097 * 32bit address space. Since mmap() probably still can be
3098 * convinced to map within 32bit, we don't use sbrk().
3099 */
3100 hashval = hash (morecore_test);
3101 if (hashval >= AMOUNTHASH)
3102 {
3103 set_nonmorecore32bit(av);
3104 }
3105 }
3106
3107
3108 /* Establish circular links for normal bins */
3109 for (i = 1; i < NBINS; ++i) {
3110 bin = bin_at(av,i);
3111 bin->fd = bin->bk = bin;
3112 }
3113
3114 av->top_pad = DEFAULT_TOP_PAD;
3115 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3116 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3117 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3118
3119#if MORECORE_CONTIGUOUS
3120 set_contiguous(av);
3121#else
3122 set_noncontiguous(av);
3123#endif
3124
3125 set_max_fast(av, DEFAULT_MXFAST);
3126
3127 av->top = cireg_getfree ();
3128 av->top->chunk = (mchunkptr) startheap;
3129 av->top->size = 0;
3130 set_previnuse(av->top);
3131 clear_inuse(av->top);
3132 hashtable[0] = av->top;
3133 av->pagesize = malloc_getpagesize;
3134
3135 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3136
3137}
3138
3139/*
3140 Other internal utilities operating on mstates
3141*/
3142
3143#if __STD_C
3144static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3145static int sYSTRIm(size_t, mstate);
3146static void malloc_consolidate(mstate);
3147#else
3148static Void_t* sYSMALLOc();
3149static int sYSTRIm();
3150static void malloc_consolidate();
3151#endif
3152
3153/* dnmalloc functions */
3154/* needs mstate so moved here */
3155
3156static chunkinfoptr
3157next_chunkinfo (chunkinfoptr ci)
3158{
3159 mchunkptr nextp;
3160 unsigned long hashval;
3161 chunkinfoptr cinfonextp;
3162 mstate av = get_malloc_state();
3163
3164 /* ci is not the last element in the linked list, just
3165 return the next chunkinfo from the list
3166 */
3167 if (!ci->hash_next)
3168 {
3169 /* ci is the last element, find the next chunkinfo by
3170 * looking up the chunkinfo for the chunk that is after p's chunk
3171 */
3172 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3173
3174 if (!(nextp == av->top->chunk))
3175 {
3176 hashval = hash (nextp);
3177 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3178 cinfonextp = hashtable[hashval];
3179 if (cinfonextp && chunk (cinfonextp) == nextp)
3180 return cinfonextp;
3181
3182#ifdef DNMALLOC_CHECKS_EXTRA
3183 /* This seems bogus; a chunkinfo may legally have no nextp if
3184 * it's the last one allocated (?)
3185 */
3186 else {
3187 if (cinfonextp)
3188 fprintf (stderr,
3189 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3190 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3191 else
3192 fprintf (stderr,
3193 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3194 chunk(ci), hashval, "null", "null", nextp);
3195 }
3196#endif
3197
3198 return NULL;
3199 }
3200 else
3201 {
3202 return av->top;
3203 }
3204
3205 }
3206 else
3207 {
3208 return (ci->hash_next);
3209 }
3210}
3211
3212static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3213 mchunkptr nextp;
3214 if (oldp->hash_next == newp)
3215 return 1;
3216 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3217 if (nextp == chunk(newp))
3218 return 1;
3219 return 0;
3220}
3221
3222
3223
3224/* Get the chunkinfo of the physically previous chunk */
3225/* Since we disposed of prev_size, we need this function to find the previous */
3226
3227static chunkinfoptr
3228prev_chunkinfo (chunkinfoptr ci)
3229{
3230 unsigned int i;
3231 chunkinfoptr prev;
3232 mchunkptr prevchunk = 0;
3233 /* chunkinfoptr temp; */
3234
3235 /* Get the hashtable location of the chunkinfo */
3236 i = hash (chunk (ci));
3237 assert(i < AMOUNTHASH); /* rw */
3238
3239 /* Get the first element of the linked list of chunkinfo's that contains p */
3240 prev = hashtable[i];
3241
3242 if (ci == prev) {
3243 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3244 i = hash(prevchunk);
3245 assert(i < AMOUNTHASH); /* rw */
3246 /* Loop over the linked list until we reach the last element */
3247 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3248 } else {
3249 /* p is not the first element in the linked list, we can just
3250 loop over the list and return the previous
3251 */
3252 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3253 }
3254
3255 return prev;
3256}
3257
3258
3259/*
3260 Debugging support
3261 Dnmalloc broke dlmallocs debugging functions, should fix them some
3262 time in the future, for now leave them undefined.
3263*/
3264
3265#define check_chunk(P)
3266#define check_free_chunk(P)
3267#define check_inuse_chunk(P)
3268#define check_remalloced_chunk(P,N)
3269#define check_malloced_chunk(P,N)
3270#define check_malloc_state()
3271
3272
3273/* ----------- Routines dealing with system allocation -------------- */
3274
3275/*
3276 sysmalloc handles malloc cases requiring more memory from the system.
3277 On entry, it is assumed that av->top does not have enough
3278 space to service request for nb bytes, thus requiring that av->top
3279 be extended or replaced.
3280*/
3281
3282#if __STD_C
3283static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3284#else
3285static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3286#endif
3287{
3288 chunkinfoptr old_top; /* incoming value of av->top */
3289 INTERNAL_SIZE_T old_size; /* its size */
3290 char* old_end; /* its end address */
3291
3292 long size; /* arg to first MORECORE or mmap call */
3293 char* brk; /* return value from MORECORE */
3294
3295 long correction; /* arg to 2nd MORECORE call */
3296 char* snd_brk; /* 2nd return val */
3297
3298 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3299 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3300 char* aligned_brk; /* aligned offset into brk */
3301
3302 chunkinfoptr p; /* the allocated/returned chunk */
3303 chunkinfoptr remainder; /* remainder from allocation */
3304 chunkinfoptr fencepost; /* fencepost */
3305 CHUNK_SIZE_T remainder_size; /* its size */
3306
3307 CHUNK_SIZE_T sum; /* for updating stats */
3308
3309 size_t pagemask = av->pagesize - 1;
3310
3311#ifdef DNMALLOC_DEBUG
3312 fprintf(stderr, "Enter sysmalloc\n");
3313#endif
3314 /*
3315 If there is space available in fastbins, consolidate and retry
3316 malloc from scratch rather than getting memory from system. This
3317 can occur only if nb is in smallbin range so we didn't consolidate
3318 upon entry to malloc. It is much easier to handle this case here
3319 than in malloc proper.
3320 */
3321
3322
3323 if (have_fastchunks(av)) {
[172]3324 Void_t * retval;
[171]3325 assert(in_smallbin_range(nb));
3326 malloc_consolidate(av);
3327#ifdef DNMALLOC_DEBUG
3328 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3329#endif
[172]3330 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3331 VALGRIND_FREELIKE_BLOCK(retval, 0);
3332 return retval;
[171]3333 }
3334
3335
3336 /*
3337 If have mmap, and the request size meets the mmap threshold, and
3338 the system supports mmap, and there are few enough currently
3339 allocated mmapped regions, try to directly map this request
3340 rather than expanding top.
3341 */
3342
3343 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3344 (av->n_mmaps < av->n_mmaps_max))) {
3345
3346 char* mm; /* return value from mmap call*/
3347
3348 /*
3349 Round up size to nearest page. For mmapped chunks, the overhead
3350 is one SIZE_SZ unit larger than for normal chunks, because there
3351 is no following chunk whose prev_size field could be used.
3352 */
3353 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3354
3355 /* Don't try if size wraps around 0 */
3356 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3357
3358
3359 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3360
3361 if (mm != (char*)(MORECORE_FAILURE)) {
3362
[172]3363 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3364
[171]3365 /*
3366 The offset to the start of the mmapped region is stored
3367 in the prev_size field of the chunk. This allows us to adjust
3368 returned start address to meet alignment requirements here
3369 and in memalign(), and still be able to compute proper
3370 address argument for later munmap in free() and realloc().
3371 */
3372
3373 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3374 p = cireg_getfree();
3375
3376 if (front_misalign > 0) {
3377 correction = MALLOC_ALIGNMENT - front_misalign;
3378 p->chunk = (mchunkptr)(mm + correction);
3379 p->hash_next = (chunkinfoptr) correction;
3380 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3381 }
3382 else {
3383 p->chunk = (mchunkptr)mm;
3384 p->hash_next = 0;
3385 set_head(p, size|INUSE|IS_MMAPPED);
3386 }
3387 hashtable_add(p);
3388 /* update statistics */
3389
3390 if (++av->n_mmaps > av->max_n_mmaps)
3391 av->max_n_mmaps = av->n_mmaps;
3392
3393 sum = av->mmapped_mem += size;
3394 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3395 av->max_mmapped_mem = sum;
3396 sum += av->sbrked_mem;
3397 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3398 av->max_total_mem = sum;
3399
3400 check_chunk(p);
3401
3402#ifdef DNMALLOC_DEBUG
3403 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3404 size, (unsigned long)/* size_t */av->max_total_mem );
3405#endif
3406 return chunk(p);
3407 }
3408 }
3409 }
3410
3411 /* Record incoming configuration of top */
3412
3413 old_top = av->top;
3414 old_size = chunksize(old_top);
3415 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3416
3417 brk = snd_brk = (char*)(MORECORE_FAILURE);
3418
3419 /*
3420 If not the first time through, we require old_size to be
3421 at least MINSIZE and to have prev_inuse set.
3422 */
3423
3424 /* assert((old_top == initial_top(av) && old_size == 0) ||
3425 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3426 prev_inuse(old_top))); */
3427
3428 /* Precondition: not enough current space to satisfy nb request */
3429 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3430
3431 /* Precondition: all fastbins are consolidated */
3432 assert(!have_fastchunks(av));
3433
3434 /* Request enough space for nb + pad + overhead */
3435 size = nb + av->top_pad + MINSIZE;
3436
3437 /*
3438 If contiguous, we can subtract out existing space that we hope to
3439 combine with new space. We add it back later only if
3440 we don't actually get contiguous space.
3441 */
3442 if (contiguous(av))
3443 size -= old_size;
3444
3445 /*
3446 Round to a multiple of page size.
3447 If MORECORE is not contiguous, this ensures that we only call it
3448 with whole-page arguments. And if MORECORE is contiguous and
3449 this is not first time through, this preserves page-alignment of
3450 previous calls. Otherwise, we correct to page-align below.
3451 */
3452
3453 size = (size + pagemask) & ~pagemask;
3454
3455 /*
3456 Don't try to call MORECORE if argument is so big as to appear
3457 negative. Note that since mmap takes size_t arg, it may succeed
3458 below even if we cannot call MORECORE.
3459 */
3460 if (size > 0 && morecore32bit(av))
3461 brk = (char*)(MORECORE(size));
3462
3463 /*
3464 If have mmap, try using it as a backup when MORECORE fails or
3465 cannot be used. This is worth doing on systems that have "holes" in
3466 address space, so sbrk cannot extend to give contiguous space, but
3467 space is available elsewhere. Note that we ignore mmap max count
3468 and threshold limits, since the space will not be used as a
3469 segregated mmap region.
3470 */
3471 if (brk != (char*)(MORECORE_FAILURE)) {
3472 av->sbrked_mem += size;
[172]3473 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
[171]3474 }
3475
3476 else {
3477
3478#ifdef DNMALLOC_DEBUG
3479 fprintf(stderr, "Morecore failure in sysmalloc\n");
3480#endif
3481
3482 /* Cannot merge with old top, so add its size back in */
3483 if (contiguous(av))
3484 size = (size + old_size + pagemask) & ~pagemask;
3485
3486 /* If we are relying on mmap as backup, then use larger units */
3487 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3488 size = MMAP_AS_MORECORE_SIZE;
3489
3490 /* Don't try if size wraps around 0 */
3491 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3492
3493#ifdef DNMALLOC_DEBUG
3494 fprintf(stderr, "Try mmap in sysmalloc\n");
3495#endif
3496 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3497
3498 if (brk != (char*)(MORECORE_FAILURE)) {
3499
[172]3500 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3501
[171]3502 av->mmapped_mem += size;
3503#ifdef DNMALLOC_DEBUG
3504 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3505#endif
3506
3507 /* We do not need, and cannot use, another sbrk call to find end */
3508 snd_brk = brk + size;
3509
3510 /*
3511 Record that we no longer have a contiguous sbrk region.
3512 After the first time mmap is used as backup, we do not
3513 ever rely on contiguous space since this could incorrectly
3514 bridge regions.
3515 */
3516 set_noncontiguous(av);
3517 }
3518 }
3519 }
3520
3521 if (brk != (char*)(MORECORE_FAILURE)) {
3522#ifdef DNMALLOC_DEBUG
3523 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3524 size, (unsigned long)av->sbrked_mem);
3525#endif
3526 /* av->sbrked_mem += size; moved up */
3527
3528 /*
3529 If MORECORE extends previous space, we can likewise extend top size.
3530 */
3531
3532 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3533 set_head(old_top, (size + old_size) | PREV_INUSE);
3534#ifdef DNMALLOC_DEBUG
3535 fprintf(stderr, "Previous space extended\n");
3536#endif
3537 }
3538
3539 /*
3540 Otherwise, make adjustments:
3541
3542 * If the first time through or noncontiguous, we need to call sbrk
3543 just to find out where the end of memory lies.
3544
3545 * We need to ensure that all returned chunks from malloc will meet
3546 MALLOC_ALIGNMENT
3547
3548 * If there was an intervening foreign sbrk, we need to adjust sbrk
3549 request size to account for fact that we will not be able to
3550 combine new space with existing space in old_top.
3551
3552 * Almost all systems internally allocate whole pages at a time, in
3553 which case we might as well use the whole last page of request.
3554 So we allocate enough more memory to hit a page boundary now,
3555 which in turn causes future contiguous calls to page-align.
3556 */
3557
3558 else {
3559 front_misalign = 0;
3560 end_misalign = 0;
3561 correction = 0;
3562 aligned_brk = brk;
3563
3564 /*
3565 If MORECORE returns an address lower than we have seen before,
3566 we know it isn't really contiguous. This and some subsequent
3567 checks help cope with non-conforming MORECORE functions and
3568 the presence of "foreign" calls to MORECORE from outside of
3569 malloc or by other threads. We cannot guarantee to detect
3570 these in all cases, but cope with the ones we do detect.
3571 */
3572 if (contiguous(av) && old_size != 0 && brk < old_end) {
3573 set_noncontiguous(av);
3574 }
3575
3576 /* handle contiguous cases */
3577 if (contiguous(av)) {
3578
3579#ifdef DNMALLOC_DEBUG
3580 fprintf(stderr, "Handle contiguous cases\n");
3581#endif
3582 /*
3583 We can tolerate forward non-contiguities here (usually due
3584 to foreign calls) but treat them as part of our space for
3585 stats reporting.
3586 */
3587 if (old_size != 0)
3588 av->sbrked_mem += brk - old_end;
3589
3590 /* Guarantee alignment of first new chunk made from this space */
3591
3592 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3593 if (front_misalign > 0) {
3594
3595 /*
3596 Skip over some bytes to arrive at an aligned position.
3597 We don't need to specially mark these wasted front bytes.
3598 They will never be accessed anyway because
3599 prev_inuse of av->top (and any chunk created from its start)
3600 is always true after initialization.
3601 */
3602
3603 correction = MALLOC_ALIGNMENT - front_misalign;
3604 aligned_brk += correction;
3605 }
3606
3607 /*
3608 If this isn't adjacent to existing space, then we will not
3609 be able to merge with old_top space, so must add to 2nd request.
3610 */
3611
3612 correction += old_size;
3613
3614 /* Extend the end address to hit a page boundary */
3615 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3616 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3617
3618 assert(correction >= 0);
3619 snd_brk = (char*)(MORECORE(correction));
3620
3621 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3622 /*
3623 If can't allocate correction, try to at least find out current
3624 brk. It might be enough to proceed without failing.
3625 */
3626 correction = 0;
3627 snd_brk = (char*)(MORECORE(0));
3628 }
3629 else if (snd_brk < brk) {
3630 /*
3631 If the second call gives noncontiguous space even though
3632 it says it won't, the only course of action is to ignore
3633 results of second call, and conservatively estimate where
3634 the first call left us. Also set noncontiguous, so this
3635 won't happen again, leaving at most one hole.
3636
3637 Note that this check is intrinsically incomplete. Because
3638 MORECORE is allowed to give more space than we ask for,
3639 there is no reliable way to detect a noncontiguity
3640 producing a forward gap for the second call.
3641 */
3642 snd_brk = brk + size;
3643 correction = 0;
3644 set_noncontiguous(av);
3645 }
[172]3646 else {
3647 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3648 }
[171]3649
3650 }
3651
3652 /* handle non-contiguous cases */
3653 else {
3654
3655#ifdef DNMALLOC_DEBUG
3656 fprintf(stderr, "Handle non-contiguous cases\n");
3657#endif
3658
3659 /* MORECORE/mmap must correctly align */
3660 assert(aligned_OK(brk));
3661
3662 /* Find out current end of memory */
3663 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3664 snd_brk = (char*)(MORECORE(0));
3665 av->sbrked_mem += snd_brk - brk - size;
3666 }
3667#ifdef DNMALLOC_DEBUG
3668 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3669#endif
3670 }
3671
3672 /* Adjust top based on results of second sbrk.
3673 *
3674 * If mmap() has been used as backup for failed morecore(),
3675 * we end up in this branch as well.
3676 */
3677 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3678#ifdef DNMALLOC_DEBUG
3679 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3680#endif
3681 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3682 av->top = cireg_getfree();
3683 av->top->chunk = (mchunkptr)aligned_brk;
3684 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3685#ifdef DNMALLOC_DEBUG
3686 fprintf(stderr, "Adjust top, top %p size %lu\n",
3687 av->top, (unsigned long)chunksize(av->top));
3688#endif
3689 hashtable_add(av->top);
3690 av->sbrked_mem += correction;
3691
3692 /*
3693 If not the first time through, we either have a
3694 gap due to foreign sbrk or a non-contiguous region. Insert a
3695 double fencepost at old_top to prevent consolidation with space
3696 we don't own. These fenceposts are artificial chunks that are
3697 marked as inuse. Original dlmalloc had two of these but too
3698 small to use. To ensure that the linked lists contain a maximum
3699 of 8 elements we only use 1. Inuse is determined by the
3700 current rather than the next chunk anyway.
3701 */
3702
3703 if (old_size != 0) {
3704#ifdef DNMALLOC_DEBUG
3705 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3706#endif
3707 /*
3708 Shrink old_top to insert fenceposts, keeping size a
3709 multiple of MALLOC_ALIGNMENT. We know there is at least
3710 enough space in old_top to do this.
3711 */
3712#ifdef DNMALLOC_DEBUG
3713 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3714 old_top, (unsigned long)old_size);
3715#endif
3716 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3717 set_head(old_top, old_size | PREV_INUSE);
3718#ifdef DNMALLOC_DEBUG
3719 fprintf(stderr, "Adjust top, old_size after %lu\n",
3720 (unsigned long)old_size);
3721#endif
3722
3723 /*
3724 Note that the following assignments completely overwrite
3725 old_top when old_size was previously MINSIZE. This is
3726 intentional. We need the fencepost, even if old_top otherwise gets
3727 lost.
3728 */
3729 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3730 it's marked inuse it will never be coalesced
3731 */
3732 fencepost = cireg_getfree();
3733 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3734 old_size);
3735 fencepost->size = 16|INUSE|PREV_INUSE;
3736 hashtable_add(fencepost);
3737 /*
3738 If possible, release the rest, suppressing trimming.
3739 */
3740 if (old_size >= MINSIZE) {
3741 INTERNAL_SIZE_T tt = av->trim_threshold;
3742#ifdef DNMALLOC_DEBUG
3743 fprintf(stderr, "Release\n");
3744#endif
3745 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3746 set_head(old_top, old_size | PREV_INUSE | INUSE);
3747 guard_set(av->guard_stored, old_top, 0, old_size);
[172]3748 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
[171]3749 fREe(chunk(old_top));
3750 av->trim_threshold = tt;
3751#ifdef DNMALLOC_DEBUG
3752 fprintf(stderr, "Release done\n");
3753#endif
3754 }
3755
3756#ifdef DNMALLOC_DEBUG
3757 fprintf(stderr, "Adjust top, size %lu\n",
3758 (unsigned long)chunksize(av->top));
3759#endif
3760
3761 } /* fenceposts */
3762 } /* adjust top */
3763 } /* not extended previous region */
3764
3765 /* Update statistics */ /* FIXME check this */
3766 sum = av->sbrked_mem;
3767 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3768 av->max_sbrked_mem = sum;
3769
3770 sum += av->mmapped_mem;
3771 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3772 av->max_total_mem = sum;
3773
3774 check_malloc_state();
3775
3776 /* finally, do the allocation */
3777
3778 p = av->top;
3779 size = chunksize(p);
3780
3781#ifdef DNMALLOC_DEBUG
3782 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3783 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3784#endif
3785
3786 /* check that one of the above allocation paths succeeded */
3787 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3788 remainder_size = size - nb;
3789 remainder = cireg_getfree();
3790 remainder->chunk = chunk_at_offset(chunk(p), nb);
3791 av->top = remainder;
3792 set_head(p, nb | PREV_INUSE | INUSE);
3793 set_head(remainder, remainder_size | PREV_INUSE);
3794 hashtable_insert (p, av->top);
3795 check_malloced_chunk(p, nb);
3796#ifdef DNMALLOC_DEBUG
3797 fprintf(stderr, "Return any (total %lu)\n",
3798 (unsigned long)/* size_t */av->max_total_mem );
3799#endif
3800 return chunk(p);
3801 }
3802
3803 }
3804
3805#ifdef DNMALLOC_DEBUG
3806 fprintf(stderr, "Return failed (total %lu)\n",
3807 (unsigned long)/* size_t */av->max_total_mem );
3808#endif
3809
3810 /* catch all failure paths */
3811 MALLOC_FAILURE_ACTION;
3812 return 0;
3813}
3814
3815
3816
3817
3818/*
3819 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3820 to the system (via negative arguments to sbrk) if there is unused
3821 memory at the `high' end of the malloc pool. It is called
3822 automatically by free() when top space exceeds the trim
3823 threshold. It is also called by the public malloc_trim routine. It
3824 returns 1 if it actually released any memory, else 0.
3825*/
3826
3827#if __STD_C
3828static int sYSTRIm(size_t pad, mstate av)
3829#else
3830static int sYSTRIm(pad, av) size_t pad; mstate av;
3831#endif
3832{
3833 long top_size; /* Amount of top-most memory */
3834 long extra; /* Amount to release */
3835 long released; /* Amount actually released */
3836 char* current_brk; /* address returned by pre-check sbrk call */
3837 char* new_brk; /* address returned by post-check sbrk call */
3838 size_t pagesz;
3839
3840 pagesz = av->pagesize;
3841 top_size = chunksize(av->top);
3842
3843 /* Release in pagesize units, keeping at least one page */
3844 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3845
3846 if (extra > 0) {
3847
3848 /*
3849 Only proceed if end of memory is where we last set it.
3850 This avoids problems if there were foreign sbrk calls.
3851 */
3852 current_brk = (char*)(MORECORE(0));
3853 if (current_brk == (char*)(av->top) + top_size) {
3854
3855 /*
3856 Attempt to release memory. We ignore MORECORE return value,
3857 and instead call again to find out where new end of memory is.
3858 This avoids problems if first call releases less than we asked,
3859 of if failure somehow altered brk value. (We could still
3860 encounter problems if it altered brk in some very bad way,
3861 but the only thing we can do is adjust anyway, which will cause
3862 some downstream failure.)
3863 */
3864
3865 MORECORE(-extra);
3866 new_brk = (char*)(MORECORE(0));
3867
3868 if (new_brk != (char*)MORECORE_FAILURE) {
3869 released = (long)(current_brk - new_brk);
3870
3871 if (released != 0) {
3872 /* Success. Adjust top. */
3873 av->sbrked_mem -= released;
3874 set_head(av->top, (top_size - released) | PREV_INUSE);
3875 check_malloc_state();
3876 return 1;
3877 }
3878 }
3879 }
3880 }
3881 return 0;
3882}
3883
3884/*
3885 ------------------------------ malloc ------------------------------
3886*/
3887
3888
3889#if __STD_C
3890DL_STATIC Void_t* mALLOc(size_t bytes)
3891#else
3892DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3893#endif
3894{
3895 mstate av = get_malloc_state();
3896
3897 INTERNAL_SIZE_T nb; /* normalized request size */
3898 unsigned int idx; /* associated bin index */
3899 mbinptr bin; /* associated bin */
3900 mfastbinptr* fb; /* associated fastbin */
3901
3902 chunkinfoptr victim; /* inspected/selected chunk */
3903 INTERNAL_SIZE_T size; /* its size */
3904 int victim_index; /* its bin index */
3905
3906 chunkinfoptr remainder; /* remainder from a split */
3907 CHUNK_SIZE_T remainder_size; /* its size */
3908
3909 unsigned int block; /* bit map traverser */
3910 unsigned int bit; /* bit map traverser */
3911 unsigned int map; /* current word of binmap */
3912
3913 chunkinfoptr fwd; /* misc temp for linking */
3914 chunkinfoptr bck; /* misc temp for linking */
3915
3916 Void_t* retval;
3917
3918 /* chunkinfoptr next; */
3919
3920
3921 /*
3922 Convert request size to internal form by adding SIZE_SZ bytes
3923 overhead plus possibly more to obtain necessary alignment and/or
3924 to obtain a size of at least MINSIZE, the smallest allocatable
3925 size. Also, checked_request2size traps (returning 0) request sizes
3926 that are so large that they wrap around zero when padded and
3927 aligned.
3928 */
3929#if defined(SH_CUTEST)
3930 extern int malloc_count;
3931 ++malloc_count;
3932#endif
3933
3934 checked_request2size(bytes, nb);
3935
3936 /*
3937 Bypass search if no frees yet
3938 */
3939 if (av && have_anychunks(av)) {
3940 goto av_initialized;
3941 }
3942 else {
3943 if (!av || av->max_fast == 0) { /* initialization check */
3944 malloc_consolidate(av);
3945 av = get_malloc_state();
3946 }
3947 goto use_top;
3948 }
3949
3950 av_initialized:
3951
3952 /*
3953 If the size qualifies as a fastbin, first check corresponding bin.
3954 */
3955 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3956 fb = &(av->fastbins[(fastbin_index(nb))]);
3957 if ( (victim = *fb) != 0) {
3958 *fb = victim->fd;
3959 check_remalloced_chunk(victim, nb);
3960 guard_set(av->guard_stored, victim, bytes, nb);
[172]3961 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3962 return chunk(victim);
3963 }
3964 }
3965
3966 /*
3967 If a small request, check regular bin. Since these "smallbins"
3968 hold one size each, no searching within bins is necessary.
3969 (For a large request, we need to wait until unsorted chunks are
3970 processed to find best fit. But for small ones, fits are exact
3971 anyway, so we can check now, which is faster.)
3972 */
3973
3974 if (in_smallbin_range(nb)) {
3975 idx = smallbin_index(nb);
3976 bin = bin_at(av,idx);
3977
3978 if ((victim = last(bin)) != bin) {
3979 bck = victim->bk;
3980 bin->bk = bck;
3981 bck->fd = bin;
3982
3983 set_all_inuse(victim);
3984
3985 check_malloced_chunk(victim, nb);
3986 guard_set(av->guard_stored, victim, bytes, nb);
[172]3987 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3988 return chunk(victim);
3989 }
3990 }
3991
3992 /*
3993 If this is a large request, consolidate fastbins before continuing.
3994 While it might look excessive to kill all fastbins before
3995 even seeing if there is space available, this avoids
3996 fragmentation problems normally associated with fastbins.
3997 Also, in practice, programs tend to have runs of either small or
3998 large requests, but less often mixtures, so consolidation is not
3999 invoked all that often in most programs. And the programs that
4000 it is called frequently in otherwise tend to fragment.
4001 */
4002
4003 else {
4004 idx = largebin_index(nb);
4005 if (have_fastchunks(av))
4006 malloc_consolidate(av);
4007 }
4008
4009 /*
4010 Process recently freed or remaindered chunks, taking one only if
4011 it is exact fit, or, if this a small request, the chunk is remainder from
4012 the most recent non-exact fit. Place other traversed chunks in
4013 bins. Note that this step is the only place in any routine where
4014 chunks are placed in bins.
4015 */
4016
4017 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4018 bck = victim->bk;
4019 size = chunksize(victim);
4020
4021 /*
4022 If a small request, try to use last remainder if it is the
4023 only chunk in unsorted bin. This helps promote locality for
4024 runs of consecutive small requests. This is the only
4025 exception to best-fit, and applies only when there is
4026 no exact fit for a small chunk.
4027 */
4028
4029 if (UNLIKELY(in_smallbin_range(nb) &&
4030 bck == unsorted_chunks(av) &&
4031 victim == av->last_remainder &&
4032 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4033
4034 /* split and reattach remainder */
4035 remainder_size = size - nb;
4036 remainder = cireg_getfree();
4037 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4038 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4039 av->last_remainder = remainder;
4040 remainder->bk = remainder->fd = unsorted_chunks(av);
4041
4042 set_head(victim, nb | PREV_INUSE|INUSE);
4043 set_head(remainder, remainder_size | PREV_INUSE);
4044 hashtable_insert(victim, remainder);
4045
4046 check_malloced_chunk(victim, nb);
4047 guard_set(av->guard_stored, victim, bytes, nb);
[172]4048 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4049 return chunk(victim);
4050 }
4051
4052 /* remove from unsorted list */
4053 unsorted_chunks(av)->bk = bck;
4054 bck->fd = unsorted_chunks(av);
4055
4056 /* Take now instead of binning if exact fit */
4057
4058 if (UNLIKELY(size == nb)) {
4059 set_all_inuse(victim)
4060 check_malloced_chunk(victim, nb);
4061 guard_set(av->guard_stored, victim, bytes, nb);
[172]4062 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4063 return chunk(victim);
4064 }
4065
4066 /* place chunk in bin */
4067
4068 if (in_smallbin_range(size)) {
4069
4070 victim_index = smallbin_index(size);
4071 bck = bin_at(av, victim_index);
4072 fwd = bck->fd;
4073 }
4074 else {
4075 victim_index = largebin_index(size);
4076 bck = bin_at(av, victim_index);
4077 fwd = bck->fd;
4078
4079 if (UNLIKELY(fwd != bck)) {
4080 /* if smaller than smallest, place first */
4081 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4082 fwd = bck;
4083 bck = bck->bk;
4084 }
4085 else if ((CHUNK_SIZE_T)(size) >=
4086 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4087
4088 /* maintain large bins in sorted order */
4089 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4090 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4091 fwd = fwd->fd;
4092 bck = fwd->bk;
4093 }
4094 }
4095 }
4096
4097 mark_bin(av, victim_index);
4098 victim->bk = bck;
4099 victim->fd = fwd;
4100 fwd->bk = victim;
4101 bck->fd = victim;
4102 }
4103
4104 /*
4105 If a large request, scan through the chunks of current bin to
4106 find one that fits. (This will be the smallest that fits unless
4107 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4108 the only step where an unbounded number of chunks might be
4109 scanned without doing anything useful with them. However the
4110 lists tend to be short.
4111 */
4112
4113 if (!in_smallbin_range(nb)) {
4114 bin = bin_at(av, idx);
4115
4116 victim = last(bin);
4117
4118 if (UNLIKELY(victim != bin)) {
4119
4120 do {
4121 size = chunksize(victim);
4122
4123 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4124 remainder_size = size - nb;
4125 unlink(victim, bck, fwd);
4126
4127 /* Split */
4128 if (remainder_size >= MINSIZE) {
4129 remainder = cireg_getfree();
4130 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4131 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4132 remainder->bk = remainder->fd = unsorted_chunks(av);
4133 set_head(victim, nb | PREV_INUSE | INUSE);
4134 set_head(remainder, remainder_size | PREV_INUSE);
4135 hashtable_insert(victim, remainder);
4136 check_malloced_chunk(victim, nb);
4137 guard_set(av->guard_stored, victim, bytes, nb);
[172]4138 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4139 return chunk(victim);
4140 }
4141 /* Exhaust */
4142 else {
4143 set_all_inuse(victim);
4144 check_malloced_chunk(victim, nb);
4145 guard_set(av->guard_stored, victim, bytes, nb);
[172]4146 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4147 return chunk(victim);
4148 }
4149 }
4150 victim = victim->bk;
4151 } while(victim != bin);
4152 }
4153 }
4154
4155 /*
4156 Search for a chunk by scanning bins, starting with next largest
4157 bin. This search is strictly by best-fit; i.e., the smallest
4158 (with ties going to approximately the least recently used) chunk
4159 that fits is selected.
4160
4161 The bitmap avoids needing to check that most blocks are nonempty.
4162 */
4163
4164
4165 ++idx;
4166 bin = bin_at(av,idx);
4167 block = idx2block(idx);
4168 map = av->binmap[block];
4169 bit = idx2bit(idx);
4170
4171 for (;;) {
4172
4173 /* Skip rest of block if there are no more set bits in this block. */
4174 if (bit > map || bit == 0) {
4175 do {
4176 if (++block >= BINMAPSIZE) /* out of bins */
4177 goto use_top;
4178 } while ( (map = av->binmap[block]) == 0);
4179
4180 bin = bin_at(av, (block << BINMAPSHIFT));
4181 bit = 1;
4182 }
4183
4184 /* Advance to bin with set bit. There must be one. */
4185 while ((bit & map) == 0) {
4186 bin = next_bin(bin);
4187 bit <<= 1;
4188 assert(bit != 0);
4189 }
4190
4191 /* Inspect the bin. It is likely to be non-empty */
4192 victim = last(bin);
4193
4194 /* If a false alarm (empty bin), clear the bit. */
4195 if (victim == bin) {
4196 av->binmap[block] = map &= ~bit; /* Write through */
4197 bin = next_bin(bin);
4198 bit <<= 1;
4199 }
4200
4201 else {
4202 size = chunksize(victim);
4203
4204 /* We know the first chunk in this bin is big enough to use. */
4205 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4206
4207 remainder_size = size - nb;
4208
4209 /* unlink */
4210 bck = victim->bk;
4211 bin->bk = bck;
4212 bck->fd = bin;
4213
4214 /* Split */
4215 if (remainder_size >= MINSIZE) {
4216 remainder = cireg_getfree();
4217 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4218
4219 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4220 remainder->bk = remainder->fd = unsorted_chunks(av);
4221 /* advertise as last remainder */
[172]4222 if (in_smallbin_range(nb))
4223 av->last_remainder = remainder;
[171]4224
4225 set_head(victim, nb | PREV_INUSE | INUSE);
4226 set_head(remainder, remainder_size | PREV_INUSE);
4227 hashtable_insert(victim, remainder);
4228 check_malloced_chunk(victim, nb);
4229 guard_set(av->guard_stored, victim, bytes, nb);
[172]4230 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4231 return chunk(victim);
4232 }
4233 /* Exhaust */
4234 else {
4235 set_all_inuse(victim);
4236 check_malloced_chunk(victim, nb);
4237 guard_set(av->guard_stored, victim, bytes, nb);
[172]4238 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4239 return chunk(victim);
4240 }
4241
4242 }
4243 }
4244
4245 use_top:
4246
4247
4248 /*
4249 If large enough, split off the chunk bordering the end of memory
4250 (held in av->top). Note that this is in accord with the best-fit
4251 search rule. In effect, av->top is treated as larger (and thus
4252 less well fitting) than any other available chunk since it can
4253 be extended to be as large as necessary (up to system
4254 limitations).
4255
4256 We require that av->top always exists (i.e., has size >=
4257 MINSIZE) after initialization, so if it would otherwise be
4258 exhuasted by current request, it is replenished. (The main
4259 reason for ensuring it exists is that we may need MINSIZE space
4260 to put in fenceposts in sysmalloc.)
4261 */
4262
4263 victim = av->top;
4264 size = chunksize(victim);
4265
4266 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
[172]4267 remainder = cireg_getfree();
[171]4268 remainder_size = size - nb;
4269 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4270 av->top = remainder;
4271 set_head(victim, nb | PREV_INUSE | INUSE);
4272 set_head(remainder, remainder_size | PREV_INUSE);
4273 hashtable_insert(victim, remainder);
4274 check_malloced_chunk(victim, nb);
4275 guard_set(av->guard_stored, victim, bytes, nb);
[172]4276 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4277 return chunk(victim);
4278 }
4279
4280 /*
4281 If no space in top, relay to handle system-dependent cases
4282 */
4283 retval = sYSMALLOc(nb, av);
4284 if (retval) {
4285 victim = mem2chunk(retval);
4286 guard_set(av->guard_stored, victim, bytes, nb);
4287 }
[172]4288 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
[171]4289 return retval;
4290}
4291
4292/*
4293 ------------------------------ free ------------------------------
4294*/
4295
4296#if __STD_C
4297DL_STATIC void fREe(Void_t* mem)
4298#else
4299DL_STATIC void fREe(mem) Void_t* mem;
4300#endif
4301{
4302 mstate av = get_malloc_state();
4303
4304 chunkinfoptr p; /* chunk corresponding to mem */
4305 INTERNAL_SIZE_T size; /* its size */
4306 mfastbinptr* fb; /* associated fastbin */
4307 chunkinfoptr prevchunk; /* previous physical chunk */
4308 chunkinfoptr nextchunk; /* next contiguous chunk */
4309 INTERNAL_SIZE_T nextsize; /* its size */
4310 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4311 chunkinfoptr bck; /* misc temp for linking */
4312 chunkinfoptr fwd; /* misc temp for linking */
4313 chunkinfoptr next;
4314#if defined(SH_CUTEST)
4315 extern int malloc_count;
4316 --malloc_count;
4317#endif
4318
4319 /* free(0) has no effect */
4320 if (mem != 0) {
4321 p = hashtable_lookup(mem);
4322 /* check that memory is managed by us
4323 * and is inuse
4324 */
4325 if (UNLIKELY(!p || !inuse(p)))
4326 {
4327#ifdef DNMALLOC_CHECKS
4328 if (p) {
4329 fprintf(stderr, "Attempt to free memory not in use\n");
4330 abort();
4331 } else {
4332 fprintf(stderr, "Attempt to free memory not allocated\n");
4333 abort();
4334 }
4335#endif
4336 assert(p && inuse(p));
4337 return;
4338 }
4339
[172]4340 VALGRIND_FREELIKE_BLOCK(mem, 0);
4341
[171]4342 guard_check(av->guard_stored, p);
[172]4343
[171]4344 size = chunksize(p);
4345
4346 check_inuse_chunk(p);
4347
4348 /*
4349 If eligible, place chunk on a fastbin so it can be found
4350 and used quickly in malloc.
4351 */
4352
4353 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4354
4355#if TRIM_FASTBINS
4356 /*
4357 If TRIM_FASTBINS set, don't place chunks
4358 bordering top into fastbins
4359 */
4360 && (chunk_at_offset(chunk(p), size) != av->top)
4361#endif
4362 ) {
4363
4364 set_fastchunks(av);
4365 fb = &(av->fastbins[fastbin_index(size)]);
4366 p->fd = *fb;
4367 *fb = p;
4368 }
4369
4370 /*
4371 Consolidate other non-mmapped chunks as they arrive.
4372 */
4373
4374 else if (!chunk_is_mmapped(p)) {
4375 set_anychunks(av);
4376
4377 nextchunk = next_chunkinfo(p);
4378 if (nextchunk)
4379 nextsize = chunksize(nextchunk);
4380 else
4381 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4382
4383 /* consolidate backward */
4384 if (UNLIKELY(!prev_inuse(p))) {
4385 prevchunk = prev_chunkinfo(p);
4386 prevsize = chunksize(prevchunk);
4387#ifdef DNMALLOC_CHECKS
4388 if (inuse(prevchunk)) {
4389 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4390 abort();
4391 }
4392#else
4393 assert(!inuse(prevchunk));
4394#endif
4395 size += prevsize;
4396 unlink(prevchunk, bck, fwd);
4397 set_head(p, size | PREV_INUSE);
4398 hashtable_skiprm(prevchunk,p);
4399 /* This chunk no longer exists in any form: release the chunkinfoptr
4400 */
4401 freecilst_add(p);
4402 p = prevchunk;
4403 }
4404
4405 if (nextchunk) {
4406 if (nextchunk != av->top) {
4407 /* get and clear inuse bit */
4408 clear_previnuse(nextchunk);
4409
4410 /* consolidate forward */
4411 if (!inuse(nextchunk)) {
[228]4412 /* if mmap is used instead of sbrk, we may have a
4413 * chunk with !nextchunk->fd && !nextchunk->bk
4414 */
4415 if (nextchunk->fd && nextchunk->fd)
4416 {
4417 unlink(nextchunk, bck, fwd);
4418 size += nextsize;
4419 set_head(p, size | PREV_INUSE);
4420 hashtable_skiprm(p, nextchunk);
4421 freecilst_add (nextchunk);
4422 }
[171]4423 }
4424
4425 set_head(p, size | PREV_INUSE);
4426 next = next_chunkinfo(p);
4427 if (next)
4428 next->prev_size = size;
4429
4430 /*
4431 Place the chunk in unsorted chunk list. Chunks are
4432 not placed into regular bins until after they have
4433 been given one chance to be used in malloc.
4434 */
4435
4436 bck = unsorted_chunks(av);
4437 fwd = bck->fd;
4438 p->bk = bck;
4439 p->fd = fwd;
4440 bck->fd = p;
4441 fwd->bk = p;
4442
4443 nextchunk = next_chunkinfo(p);
4444 if (nextchunk)
4445 nextchunk->prev_size = chunksize(p);
4446
4447 check_free_chunk(p);
4448 }
4449
4450 /*
4451 If the chunk borders the current high end of memory,
4452 consolidate into top
4453 */
4454
4455 else {
4456 size += nextsize;
4457 set_head(p, size | PREV_INUSE);
4458 hashtable_remove(chunk(av->top));
4459 freecilst_add(av->top);
4460 av->top = p;
4461 check_chunk(p);
4462 }
4463 } /* if (nextchunk) */
4464
4465 /*
4466 If freeing a large space, consolidate possibly-surrounding
4467 chunks. Then, if the total unused topmost memory exceeds trim
4468 threshold, ask malloc_trim to reduce top.
4469
4470 Unless max_fast is 0, we don't know if there are fastbins
4471 bordering top, so we cannot tell for sure whether threshold
4472 has been reached unless fastbins are consolidated. But we
4473 don't want to consolidate on each free. As a compromise,
4474 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4475 is reached.
4476 */
4477
4478 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4479 if (have_fastchunks(av))
4480 malloc_consolidate(av);
4481
4482#ifndef MORECORE_CANNOT_TRIM
4483 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4484 (CHUNK_SIZE_T)(av->trim_threshold))
4485 {
4486 if (morecore32bit(av))
4487 {
4488#ifdef DNMALLOC_DEBUG
4489 fprintf(stderr, "Calling systrim from free()\n");
4490#endif
4491 sYSTRIm(av->top_pad, av);
4492#ifdef DNMALLOC_DEBUG
4493 fprintf(stderr, "Systrim done\n");
4494#endif
4495 }
4496 }
4497#endif
4498 }
4499
4500 }
4501 /*
4502 If the chunk was allocated via mmap, release via munmap()
4503 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4504 true, then user must have overwritten memory. There's nothing
4505 we can do to catch this error unless DEBUG is set, in which case
4506 check_inuse_chunk (above) will have triggered error.
4507 */
4508
4509 else {
4510 int ret;
4511 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4512 av->n_mmaps--;
4513 av->mmapped_mem -= (size + offset);
4514 ret = munmap((char*) chunk(p) - offset, size + offset);
4515 hashtable_remove_mmapped(chunk(p));
4516 freecilst_add(p);
4517 /* munmap returns non-zero on failure */
4518 assert(ret == 0);
4519 }
4520 }
4521}
4522
4523/*
4524 ------------------------- malloc_consolidate -------------------------
4525
4526 malloc_consolidate is a specialized version of free() that tears
4527 down chunks held in fastbins. Free itself cannot be used for this
4528 purpose since, among other things, it might place chunks back onto
4529 fastbins. So, instead, we need to use a minor variant of the same
4530 code.
4531
4532 Also, because this routine needs to be called the first time through
4533 malloc anyway, it turns out to be the perfect place to trigger
4534 initialization code.
4535*/
4536
4537#if __STD_C
4538static void malloc_consolidate(mstate av)
4539#else
4540static void malloc_consolidate(av) mstate av;
4541#endif
4542{
4543 mfastbinptr* fb; /* current fastbin being consolidated */
4544 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4545 chunkinfoptr p; /* current chunk being consolidated */
4546 chunkinfoptr nextp; /* next chunk to consolidate */
4547 chunkinfoptr prevp;
4548 chunkinfoptr unsorted_bin; /* bin header */
4549 chunkinfoptr first_unsorted; /* chunk to link to */
4550
4551 /* These have same use as in free() */
4552 chunkinfoptr nextchunk;
4553 INTERNAL_SIZE_T size;
4554 INTERNAL_SIZE_T nextsize;
4555 INTERNAL_SIZE_T prevsize;
4556 chunkinfoptr bck;
4557 chunkinfoptr fwd;
4558 chunkinfoptr next;
4559
4560 /*
4561 If max_fast is 0, we know that av hasn't
4562 yet been initialized, in which case do so below
4563 */
4564 if (av && av->max_fast != 0) {
[172]4565
[171]4566 clear_fastchunks(av);
4567
4568 unsorted_bin = unsorted_chunks(av);
4569
4570 /*
4571 Remove each chunk from fast bin and consolidate it, placing it
4572 then in unsorted bin. Among other reasons for doing this,
4573 placing in unsorted bin avoids needing to calculate actual bins
4574 until malloc is sure that chunks aren't immediately going to be
4575 reused anyway.
4576 */
4577
4578 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4579 fb = &(av->fastbins[0]);
4580 do {
4581 if ( UNLIKELY((p = *fb) != 0)) {
4582 *fb = 0;
4583 do {
4584 check_inuse_chunk(p);
4585 nextp = p->fd;
4586
4587 /*
4588 * Slightly streamlined version of consolidation code in free()
4589 */
4590
4591 size = chunksize(p);
4592 nextchunk = next_chunkinfo(p);
4593
4594 /* gcc doesn't notice that it's only used if (nextchunk) */
4595 if (nextchunk)
4596 nextsize = chunksize(nextchunk);
4597 else
4598 nextsize = 0;
4599
4600 if (!prev_inuse(p)) {
4601 prevp = prev_chunkinfo(p);
4602 prevsize = chunksize(prevp);
4603 size += prevsize;
4604#ifdef DNMALLOC_CHECKS
4605 if (inuse(prevp)) {
4606 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4607 abort();
4608 }
4609#else
4610 assert(!inuse(prevp));
4611#endif
4612 unlink(prevp, bck, fwd);
4613 set_head(p, size | PREV_INUSE);
4614 hashtable_skiprm(prevp,p);
4615 freecilst_add(p);
4616 p=prevp;
4617 }
4618
4619 if (nextchunk) {
4620 if (nextchunk != av->top) {
4621
4622 clear_previnuse(nextchunk);
4623
[172]4624 /* if mmap is used instead of sbrk, we may have a
4625 * chunk with !nextchunk->fd && !nextchunk->bk
4626 */
[171]4627 if (!inuse(nextchunk)) {
[172]4628 if( nextchunk->fd && nextchunk->bk) {
4629 size += nextsize;
4630 unlink(nextchunk, bck, fwd);
4631 set_head(p, size | PREV_INUSE);
4632 hashtable_skiprm(p,nextchunk);
4633 freecilst_add(nextchunk);
4634 }
[171]4635 }
4636
4637 first_unsorted = unsorted_bin->fd;
4638 unsorted_bin->fd = p;
4639 first_unsorted->bk = p;
4640
4641 set_head(p, size | PREV_INUSE);
4642 p->bk = unsorted_bin;
4643 p->fd = first_unsorted;
4644 next = next_chunkinfo(p);
4645 if (next)
4646 next->prev_size = size;
4647
4648
4649 }
4650
[172]4651 else if (nextchunk == av->top) {
[171]4652 size += nextsize;
4653 set_head(p, size | PREV_INUSE);
4654 hashtable_remove(chunk(av->top));
4655 freecilst_add(av->top);
4656 av->top = p;
4657 }
[172]4658 } /* if (nextchunk) */
[171]4659
4660 } while ( (p = nextp) != 0);
4661
4662 }
4663 } while (fb++ != maxfb);
4664 }
4665 else {
[237]4666 /* Initialize dnmalloc */
[171]4667 dnmalloc_init();
4668 malloc_init_state(get_malloc_state());
4669 check_malloc_state();
4670 }
4671}
4672
4673/*
4674 ------------------------------ realloc ------------------------------
4675*/
4676
4677
4678#if __STD_C
4679DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4680#else
4681DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4682#endif
4683{
4684 mstate av = get_malloc_state();
4685
4686 INTERNAL_SIZE_T nb; /* padded request size */
4687
4688 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4689 INTERNAL_SIZE_T oldsize; /* its size */
4690
4691 chunkinfoptr newp; /* chunk to return */
4692 INTERNAL_SIZE_T newsize; /* its size */
4693 Void_t* newmem; /* corresponding user mem */
4694
4695 chunkinfoptr next; /* next contiguous chunk after oldp */
4696
4697 chunkinfoptr remainder; /* extra space at end of newp */
4698 CHUNK_SIZE_T remainder_size; /* its size */
4699
4700 chunkinfoptr bck; /* misc temp for linking */
4701 chunkinfoptr fwd; /* misc temp for linking */
4702
4703 CHUNK_SIZE_T copysize; /* bytes to copy */
4704 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4705 INTERNAL_SIZE_T* s; /* copy source */
4706 INTERNAL_SIZE_T* d; /* copy destination */
4707
4708
4709#ifdef REALLOC_ZERO_BYTES_FREES
4710 if (UNLIKELY(bytes == 0)) {
4711 fREe(oldmem);
4712 return 0;
4713 }
4714#endif
4715
4716 if (UNLIKELY(!av || av->max_fast == 0)) {
4717 malloc_consolidate(av);
4718 av = get_malloc_state();
4719 }
4720
4721 /* realloc of null is supposed to be same as malloc */
4722 if (UNLIKELY(oldmem == 0))
4723 return mALLOc(bytes);
4724
4725 checked_request2size(bytes, nb);
4726
4727 oldp = hashtable_lookup(oldmem);
4728
4729 if (UNLIKELY(!oldp || !inuse(oldp))){
4730 /* attempt to either realloc memory not managed by us
4731 * or memory that is not in use
4732 */
4733#ifdef DNMALLOC_CHECKS
4734 if (oldp) {
4735 fprintf(stderr, "Attempt to free memory not in use\n");
4736 abort();
4737 } else {
4738 fprintf(stderr, "Attempt to free memory not allocated\n");
4739 abort();
4740 }
4741#endif
4742 assert(oldp && inuse(oldp));
4743 return 0;
4744 }
4745
[172]4746 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
[171]4747 guard_check(av->guard_stored, oldp);
4748
4749 oldsize = chunksize(oldp);
4750
4751 check_inuse_chunk(oldp);
4752
4753 if (!chunk_is_mmapped(oldp)) {
4754
4755 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4756 /* already big enough; split below */
4757 newp = oldp;
4758 newsize = oldsize;
4759 }
4760
4761 else {
4762 next = next_chunkinfo(oldp);
4763 if (next)
4764 next->prev_size = oldsize;
4765 /* Try to expand forward into top */
4766 if (next && next == av->top &&
4767 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4768 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4769 set_head_size(oldp, nb);
4770 hashtable_remove(chunk(av->top));
4771 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4772 set_head(av->top, (newsize - nb) | PREV_INUSE);
4773 /* av->top->chunk has been moved move in hashtable */
4774 hashtable_insert(oldp, av->top);
4775 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4776 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4777 return chunk(oldp);
4778 }
4779
4780 /* Try to expand forward into next chunk; split off remainder below */
4781 else if (next && next != av->top &&
4782 !inuse(next) &&
4783 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4784 (CHUNK_SIZE_T)(nb)) {
4785 newp = oldp;
4786 unlink(next, bck, fwd);
4787 hashtable_remove(chunk(next));
4788 freecilst_add(next);
4789 next = next_chunkinfo(oldp);
4790 if (next)
4791 next->prev_size = newsize;
4792 }
4793
4794 /* allocate, copy, free */
4795 else {
4796
4797 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
[172]4798 if (newmem == 0)
[171]4799 return 0; /* propagate failure */
4800
4801 newp = hashtable_lookup(newmem);
4802 newsize = chunksize(newp);
4803
[172]4804
[171]4805 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4806 /*
4807 Avoid copy if newp is next chunk after oldp.
4808 */
4809 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4810 newsize += oldsize;
4811 set_head_size(oldp, newsize);
4812 hashtable_skiprm(oldp, newp);
4813 freecilst_add(newp);
4814 newp = oldp;
4815 }
4816 else {
4817 /*
4818 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4819 We know that contents have an even number of
4820 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4821 */
4822
[172]4823 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4824
[171]4825 copysize = oldsize;
4826 s = (INTERNAL_SIZE_T*)(oldmem);
4827 d = (INTERNAL_SIZE_T*)(newmem);
4828 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4829 assert(ncopies >= 2);
4830
4831 if (ncopies > 10)
4832 MALLOC_COPY(d, s, copysize);
4833
4834 else {
4835 *(d+0) = *(s+0);
4836 *(d+1) = *(s+1);
4837 if (ncopies > 2) {
4838 *(d+2) = *(s+2);
4839 *(d+3) = *(s+3);
4840 if (ncopies > 4) {
4841 *(d+4) = *(s+4);
4842 *(d+5) = *(s+5);
4843 if (ncopies > 6) {
4844 *(d+6) = *(s+6);
4845 *(d+7) = *(s+7);
4846 if (ncopies > 8) {
4847 *(d+8) = *(s+8);
4848 *(d+9) = *(s+9);
4849 }
4850 }
4851 }
4852 }
4853 }
4854
4855 fREe(oldmem);
4856 check_inuse_chunk(newp);
4857 guard_set(av->guard_stored, newp, bytes, nb);
4858 return chunk(newp);
4859 }
4860 }
4861 }
4862
4863 /* If possible, free extra space in old or extended chunk */
4864
4865 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4866
4867 remainder_size = newsize - nb;
4868
4869 if (remainder_size >= MINSIZE) { /* split remainder */
4870 remainder = cireg_getfree();
4871 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4872 set_head_size(newp, nb);
4873 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4874 remainder->prev_size = nb;
4875 hashtable_insert(newp, remainder);
4876 /* Mark remainder as inuse so free() won't complain */
4877 set_all_inuse(remainder);
4878 guard_set(av->guard_stored, remainder, 0, remainder_size);
[172]4879 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
[171]4880 fREe(chunk(remainder));
4881 }
4882 else { /* not enough extra to split off */
4883 set_head_size(newp, newsize);
4884 set_all_inuse(newp);
4885 }
4886
4887 check_inuse_chunk(newp);
4888 guard_set(av->guard_stored, newp, bytes, nb);
[172]4889 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
[171]4890 return chunk(newp);
4891 }
4892
4893 /*
4894 Handle mmap cases
4895 */
4896
4897 else {
4898
4899#if HAVE_MREMAP
4900 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4901 size_t pagemask = av->pagesize - 1;
4902 char *cp;
4903 CHUNK_SIZE_T sum;
4904
[172]4905
[171]4906 /* Note the extra SIZE_SZ overhead */
[237]4907 /* newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; */
[171]4908 newsize = (nb + offset + pagemask) & ~pagemask;
4909
4910 /* don't need to remap if still within same page */
4911 if (oldsize == newsize - offset)
4912 {
4913 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4914 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4915 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
[171]4916 return oldmem;
4917 }
4918
4919 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4920
4921 if (cp != (char*)MORECORE_FAILURE) {
4922
4923 hashtable_remove_mmapped(chunk(oldp));
4924
4925 oldp->chunk = (mchunkptr)(cp + offset);
4926 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4927
4928 hashtable_add(oldp);
4929
4930 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4931 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4932
4933 /* update statistics */
4934 sum = av->mmapped_mem += newsize - oldsize;
4935 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4936 av->max_mmapped_mem = sum;
4937 sum += av->sbrked_mem;
4938 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4939 av->max_total_mem = sum;
4940
4941 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4942 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4943 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4944 return chunk(oldp);
4945 }
4946#endif /* have MREMAP */
4947
4948 /* Note the extra SIZE_SZ overhead. */
4949 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4950 newmem = oldmem; /* do nothing */
4951 else {
4952 /* Must alloc, copy, free. */
4953 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4954 if (newmem != 0) {
4955 MALLOC_COPY(newmem, oldmem, oldsize);
4956 fREe(oldmem);
4957 }
4958 }
[172]4959 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
[171]4960 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4961 return newmem;
4962 }
4963}
4964
4965/*
4966 ---------------------------posix_memalign ----------------------------
4967*/
4968
4969#if __STD_C
4970DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4971#else
4972DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4973#endif
4974{
4975 mstate av;
4976
4977 if (alignment % sizeof(void *) != 0)
4978 return EINVAL;
4979 if ((alignment & (alignment - 1)) != 0)
4980 return EINVAL;
4981
4982 av = get_malloc_state();
4983 if (!av || av->max_fast == 0) malloc_consolidate(av);
4984 *memptr = mEMALIGn(alignment, bytes);
4985
4986 return (*memptr != NULL ? 0 : ENOMEM);
4987}
4988
4989/*
4990 ------------------------------ memalign ------------------------------
4991*/
4992
4993#if __STD_C
4994DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
4995#else
4996DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4997#endif
4998{
4999 INTERNAL_SIZE_T nb; /* padded request size */
5000 char* m; /* memory returned by malloc call */
5001 chunkinfoptr p; /* corresponding chunk */
5002 char* brk; /* alignment point within p */
5003 chunkinfoptr newp; /* chunk to return */
5004 INTERNAL_SIZE_T newsize; /* its size */
5005 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5006 chunkinfoptr remainder; /* spare room at end to split off */
5007 CHUNK_SIZE_T remainder_size; /* its size */
5008 INTERNAL_SIZE_T size;
5009 mstate av;
5010
5011 /* If need less alignment than we give anyway, just relay to malloc */
5012
5013 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
5014
5015 /* Otherwise, ensure that it is at least a minimum chunk size */
5016
5017 if (alignment < MINSIZE) alignment = MINSIZE;
5018
5019 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5020 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5021 size_t a = MALLOC_ALIGNMENT * 2;
5022 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5023 alignment = a;
5024 }
5025
5026 checked_request2size(bytes, nb);
5027
5028 /*
5029 Strategy: find a spot within that chunk that meets the alignment
5030 request, and then possibly free the leading and trailing space.
5031 */
5032
5033
5034 /* Call malloc with worst case padding to hit alignment. */
5035
5036 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5037
5038 if (m == 0) return 0; /* propagate failure */
5039
5040 av = get_malloc_state();
5041
5042 p = hashtable_lookup((mchunkptr) m);
5043
5044 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5045
5046 /*
5047 Find an aligned spot inside chunk. Since we need to give back
5048 leading space in a chunk of at least MINSIZE, if the first
5049 calculation places us at a spot with less than MINSIZE leader,
5050 we can move to the next aligned spot -- we've allocated enough
5051 total room so that this is always possible.
5052 */
5053
5054 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5055 -((signed long) alignment)));
5056 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5057 brk += alignment;
5058
5059 newp = cireg_getfree();
5060 newp->chunk = (mchunkptr)brk;
5061 leadsize = brk - (char*)(chunk(p));
5062 newsize = chunksize(p) - leadsize;
5063
5064 /* For mmapped chunks, just adjust offset */
5065 if (UNLIKELY(chunk_is_mmapped(p))) {
5066 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5067 set_head(newp, newsize|IS_MMAPPED|INUSE);
5068 hashtable_remove_mmapped(chunk(p));
5069 freecilst_add(p);
5070 hashtable_add(newp);
5071 guard_set(av->guard_stored, newp, bytes, nb);
5072 return chunk(newp);
5073 }
5074
5075 /* Otherwise, give back leader, use the rest */
5076 set_head(newp, newsize | PREV_INUSE | INUSE);
5077 set_head_size(p, leadsize);
5078 set_all_inuse(newp);
5079 hashtable_add(newp); /* 20.05.2008 rw */
5080 guard_set(av->guard_stored, p, 0, leadsize);
5081 fREe(chunk(p));
5082 p = newp;
5083
5084 assert (newsize >= nb &&
5085 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5086 }
5087
5088 /* Also give back spare room at the end */
5089 if (!chunk_is_mmapped(p)) {
5090 size = chunksize(p);
5091 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5092 remainder = cireg_getfree();
5093 remainder_size = size - nb;
5094 remainder->chunk = chunk_at_offset(chunk(p), nb);
5095 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5096 set_head_size(p, nb);
5097 hashtable_add(remainder); /* 20.05.2008 rw */
5098 guard_set(av->guard_stored, remainder, 0, remainder_size);
5099 fREe(chunk(remainder));
5100 }
5101 }
5102
5103 check_inuse_chunk(p);
5104 guard_set(av->guard_stored, p, bytes, nb);
5105 return chunk(p);
5106}
5107
5108/*
5109 ------------------------------ calloc ------------------------------
5110*/
5111
5112#if __STD_C
5113DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5114#else
5115DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5116#endif
5117{
5118 chunkinfoptr p;
5119 CHUNK_SIZE_T clearsize;
5120 CHUNK_SIZE_T nclears;
5121 INTERNAL_SIZE_T* d;
5122 Void_t* mem;
5123
5124
5125 mem = mALLOc(n_elements * elem_size);
5126
5127 if (mem != 0) {
5128 p = hashtable_lookup(mem);
5129
5130 if (!chunk_is_mmapped(p))
5131 {
5132 /*
5133 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5134 We know that contents have an even number of
5135 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5136 */
5137
5138 d = (INTERNAL_SIZE_T*)mem;
5139 clearsize = chunksize(p);
5140 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5141 assert(nclears >= 2);
5142
5143 if (nclears > 10) {
5144 MALLOC_ZERO(d, clearsize);
5145 }
5146
5147 else {
5148 *(d+0) = 0;
5149 *(d+1) = 0;
5150 if (nclears > 2) {
5151 *(d+2) = 0;
5152 *(d+3) = 0;
5153 if (nclears > 4) {
5154 *(d+4) = 0;
5155 *(d+5) = 0;
5156 if (nclears > 6) {
5157 *(d+6) = 0;
5158 *(d+7) = 0;
5159 if (nclears > 8) {
5160 *(d+8) = 0;
5161 *(d+9) = 0;
5162 }
5163 }
5164 }
5165 }
5166 }
5167 }
5168#if ! MMAP_CLEARS
5169 else
5170 {
5171 d = (INTERNAL_SIZE_T*)mem;
5172 clearsize = chunksize(p);
5173 MALLOC_ZERO(d, clearsize);
5174 }
5175#endif
5176 /* Set guard again, since we just cleared it
5177 */
5178 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5179 }
5180
5181 return mem;
5182}
5183
5184/*
5185 ------------------------------ valloc ------------------------------
5186*/
5187
5188#if __STD_C
5189DL_STATIC Void_t* vALLOc(size_t bytes)
5190#else
5191DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5192#endif
5193{
5194 /* Ensure initialization */
5195 mstate av = get_malloc_state();
5196 if (!av || av->max_fast == 0) {
5197 malloc_consolidate(av);
5198 av = get_malloc_state();
5199 }
5200 return mEMALIGn(av->pagesize, bytes);
5201}
5202
5203/*
5204 ------------------------------ pvalloc ------------------------------
5205*/
5206
5207
5208#if __STD_C
5209DL_STATIC Void_t* pVALLOc(size_t bytes)
5210#else
5211DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5212#endif
5213{
5214 mstate av = get_malloc_state();
5215 size_t pagesz;
5216
5217 /* Ensure initialization */
5218 if (!av || av->max_fast == 0) {
5219 malloc_consolidate(av);
5220 av = get_malloc_state();
5221 }
5222 pagesz = av->pagesize;
5223 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5224}
5225
5226
5227/*
5228 ------------------------------ malloc_trim ------------------------------
5229*/
5230
5231#if __STD_C
5232DL_STATIC int mTRIm(size_t pad)
5233#else
5234DL_STATIC int mTRIm(pad) size_t pad;
5235#endif
5236{
5237 mstate av = get_malloc_state();
5238 /* Ensure initialization/consolidation */
5239 malloc_consolidate(av);
5240 av = get_malloc_state();
5241#ifndef MORECORE_CANNOT_TRIM
5242 if (morecore32bit(av))
5243 return sYSTRIm(pad, av);
5244 else
5245 return 0;
5246#else
5247 return 0;
5248#endif
5249}
5250
5251
5252
5253/*
5254 ------------------------- malloc_usable_size -------------------------
5255*/
5256
5257#if __STD_C
5258DL_STATIC size_t mUSABLe(Void_t* mem)
5259#else
5260DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5261#endif
5262{
5263 chunkinfoptr p;
5264 if (mem != 0) {
5265 p = hashtable_lookup(mem);
5266 if (p && inuse(p)) return chunksize(p);
5267 }
5268 return 0;
5269}
5270
5271/*
5272 ------------------------------ mallinfo ------------------------------
5273*/
5274
5275DL_STATIC struct mallinfo mALLINFo()
5276{
5277 mstate av = get_malloc_state();
5278 struct mallinfo mi;
5279 unsigned int i;
5280 mbinptr b;
5281 chunkinfoptr p;
5282 INTERNAL_SIZE_T avail;
5283 INTERNAL_SIZE_T fastavail;
5284 int nblocks;
5285 int nfastblocks;
5286
5287 /* Ensure initialization */
5288 if (!av || av->top == 0) {
5289 malloc_consolidate(av);
5290 av = get_malloc_state();
5291 }
5292 check_malloc_state();
5293
5294 /* Account for top */
5295 avail = chunksize(av->top);
5296 nblocks = 1; /* top always exists */
5297
5298 /* traverse fastbins */
5299 nfastblocks = 0;
5300 fastavail = 0;
5301
5302 for (i = 0; i < NFASTBINS; ++i) {
5303 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5304 ++nfastblocks;
5305 fastavail += chunksize(p);
5306 }
5307 }
5308
5309 avail += fastavail;
5310
5311 /* traverse regular bins */
5312 for (i = 1; i < NBINS; ++i) {
5313 b = bin_at(av, i);
5314 for (p = last(b); p != b; p = p->bk) {
5315 ++nblocks;
5316 avail += chunksize(p);
5317 }
5318 }
5319
5320 mi.smblks = nfastblocks;
5321 mi.ordblks = nblocks;
5322 mi.fordblks = avail;
5323 mi.uordblks = av->sbrked_mem - avail;
5324 mi.arena = av->sbrked_mem;
5325 mi.hblks = av->n_mmaps;
5326 mi.hblkhd = av->mmapped_mem;
5327 mi.fsmblks = fastavail;
5328 mi.keepcost = chunksize(av->top);
5329 mi.usmblks = av->max_total_mem;
5330 return mi;
5331}
5332
5333/*
5334 ------------------------------ malloc_stats ------------------------------
5335*/
5336
5337DL_STATIC void mSTATs()
5338{
5339 struct mallinfo mi = mALLINFo();
5340
5341 fprintf(stderr, "hashtable = %10lu MB\n",
5342 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5343 fprintf(stderr, "max system bytes = %10lu\n",
5344 (CHUNK_SIZE_T)(mi.usmblks));
5345 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5346 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5347 (CHUNK_SIZE_T)(mi.arena),
5348 (CHUNK_SIZE_T)(mi.hblkhd));
5349 fprintf(stderr, "in use bytes = %10lu\n",
5350 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5351
5352}
5353
5354
5355/*
5356 ------------------------------ mallopt ------------------------------
5357*/
5358
5359#if __STD_C
5360DL_STATIC int mALLOPt(int param_number, int value)
5361#else
5362DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5363#endif
5364{
5365 mstate av = get_malloc_state();
5366 /* Ensure initialization/consolidation */
5367 malloc_consolidate(av);
5368 av = get_malloc_state();
5369
5370 switch(param_number) {
5371 case M_MXFAST:
5372 if (value >= 0 && value <= MAX_FAST_SIZE) {
5373 set_max_fast(av, value);
5374 return 1;
5375 }
5376 else
5377 return 0;
5378
5379 case M_TRIM_THRESHOLD:
5380 av->trim_threshold = value;
5381 return 1;
5382
5383 case M_TOP_PAD:
5384 av->top_pad = value;
5385 return 1;
5386
5387 case M_MMAP_THRESHOLD:
5388 av->mmap_threshold = value;
5389 return 1;
5390
5391 case M_MMAP_MAX:
5392 if (value != 0)
5393 return 0;
5394 av->n_mmaps_max = value;
5395 return 1;
5396
5397 default:
5398 return 0;
5399 }
5400}
5401
5402
5403/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5404
5405/*
5406 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5407 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5408 *
5409 * Permission to use, copy, modify, and distribute this software for any
5410 * purpose with or without fee is hereby granted, provided that the above
5411 * copyright notice and this permission notice appear in all copies.
5412 *
5413 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5414 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5415 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5416 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5417 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5418 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5419 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5420 */
5421
5422/*
5423 * Arc4 random number generator for OpenBSD.
5424 *
5425 * This code is derived from section 17.1 of Applied Cryptography,
5426 * second edition, which describes a stream cipher allegedly
5427 * compatible with RSA Labs "RC4" cipher (the actual description of
5428 * which is a trade secret). The same algorithm is used as a stream
5429 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5430 *
5431 * Here the stream cipher has been modified always to include the time
5432 * when initializing the state. That makes it impossible to
5433 * regenerate the same random sequence twice, so this can't be used
5434 * for encryption, but will generate good random numbers.
5435 *
5436 * RC4 is a registered trademark of RSA Laboratories.
5437 */
5438
5439/* Moved u_int8_t -> unsigned char (portability)
5440 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5441 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5442 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5443 * This is arc4random(3) using urandom.
5444 */
5445
5446#include <fcntl.h>
5447#include <limits.h>
5448#include <stdlib.h>
5449#include <sys/param.h>
5450#include <sys/time.h>
5451
5452struct arc4_stream {
5453 unsigned char i;
5454 unsigned char j;
5455 unsigned char s[256];
5456};
5457
5458static int rs_initialized;
5459static struct arc4_stream rs;
5460static pid_t arc4_stir_pid;
5461static int arc4_count;
5462
5463static unsigned char arc4_getbyte(void);
5464
5465static void
5466arc4_init(void)
5467{
5468 int n;
5469
5470 for (n = 0; n < 256; n++)
5471 rs.s[n] = n;
5472 rs.i = 0;
5473 rs.j = 0;
5474}
5475
5476static inline void
5477arc4_addrandom(unsigned char *dat, int datlen)
5478{
5479 int n;
5480 unsigned char si;
5481
5482 rs.i--;
5483 for (n = 0; n < 256; n++) {
5484 rs.i = (rs.i + 1);
5485 si = rs.s[rs.i];
5486 rs.j = (rs.j + si + dat[n % datlen]);
5487 rs.s[rs.i] = rs.s[rs.j];
5488 rs.s[rs.j] = si;
5489 }
5490 rs.j = rs.i;
5491}
5492
5493#ifdef HAVE_SCHED_H
5494#include <sched.h>
5495#endif
5496
5497static void
5498arc4_stir(void)
5499{
5500 int i;
5501 struct {
5502 struct timeval tv1;
5503 struct timeval tv2;
5504 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5505 } rdat;
5506#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5507 size_t sz = 0;
5508 int fd;
5509#endif
5510
5511 gettimeofday(&rdat.tv1, NULL);
5512
5513
5514 if (!rs_initialized) {
5515 arc4_init();
5516 rs_initialized = 1;
5517 }
5518
5519#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5520
5521#ifdef HAVE_SCHED_YIELD
5522 /* Yield the processor to introduce some random delay. */
5523 (void) sched_yield();
5524#endif
5525
5526 /*
5527 * Pthread problem in multithreaded code on *BSD.
5528 */
5529 fd = open("/dev/urandom", O_RDONLY);
5530 if (fd != -1) {
5531 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5532 close(fd);
5533 }
5534 if (sz > sizeof (rdat.rnd))
5535 sz = 0;
5536 #endif
5537
5538 arc4_stir_pid = getpid();
5539 gettimeofday(&rdat.tv2, NULL);
5540
5541 arc4_addrandom((void *)&rdat, sizeof(rdat));
5542
5543 /*
5544 * Discard early keystream, as per recommendations in:
5545 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5546 */
5547 for (i = 0; i < 256; i++)
5548 (void)arc4_getbyte();
5549 arc4_count = 1600000;
5550}
5551
5552static unsigned char
5553arc4_getbyte(void)
5554{
5555 unsigned char si, sj;
5556
5557 rs.i = (rs.i + 1);
5558 si = rs.s[rs.i];
5559 rs.j = (rs.j + si);
5560 sj = rs.s[rs.j];
5561 rs.s[rs.i] = sj;
5562 rs.s[rs.j] = si;
5563 return (rs.s[(si + sj) & 0xff]);
5564}
5565
5566
5567 /* Changed to return char* */
5568static char *
5569dnmalloc_arc4random(void)
5570{
5571 static char val[4];
5572
5573 /* We only call this once, hence no need for locking. */
5574
5575 /* _ARC4_LOCK(); */
5576 arc4_count -= 4;
5577 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5578 arc4_stir();
5579
5580 val[0] = (char) arc4_getbyte();
5581 val[1] = (char) arc4_getbyte();
5582 val[2] = (char) arc4_getbyte();
5583 val[3] = (char) arc4_getbyte();
5584
5585 arc4_stir();
5586 /* _ARC4_UNLOCK(); */
5587 return val;
5588}
5589
5590#else
5591int dnmalloc_pthread_init() { return 0; }
5592#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.