source: trunk/src/dnmalloc.c@ 264

Last change on this file since 264 was 237, checked in by katerina, 15 years ago

Eliminate C99-style comments (ticket #154).

File size: 164.2 KB
Line 
1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
8 Yves Younan
9 Celestijnenlaan 200A
10 B-3001 Heverlee
11 Belgium
12
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 2.1 of the License, or (at your option) any later version.
17
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26
27*/
28
29/* Current version: dnmalloc 1.0 */
30/* Includes arc4random from OpenBSD, which is under the BDS license */
31
32/* Versions:
33 0.1-0.5:
34 Proof of concept implementation by Hans Van den Eynden and Yves Younan
35 0.6-0.7:
36 Bug fixes by Yves Younan
37 0.8-1.0.beta4:
38 Reimplementation from scratch by Yves Younan
39 1.0.beta4:
40 Public release
41 1.0.beta5:
42 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
43 A freechunkinfo region is now freed when it is completely empty and
44 not the current one
45
46 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
47 ---------------------
48
49 Compiler warnings fixed
50 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
51 (and what the standard says)
52 Removed unused code
53 Fix assert(aligned_OK(chunk(newp)));
54 -> assert(aligned_OK(chunk(oldp)));
55 Fix statistics in sYSMALLOc
56 Fix overwrite of av->top in sYSMALLOc
57 Provide own assert(), glibc assert() doesn't work (calls malloc)
58 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
59 Remove cfree, independent_cmalloc, independent_comalloc (untested
60 public functions not covered by any standard)
61 Provide posix_memalign (that one is in the standard)
62 Move the malloc_state struct to mmapped memory protected by guard pages
63 Add arc4random function to initialize random canary on startup
64 Implement random canary at end of (re|m)alloced/memaligned buffer,
65 check at free/realloc
66 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
67 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
68
69 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
70 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
71 checked_request2size (gcc predicts if{} branch to be taken).
72 Use UNLIKELY macro (gcc __builtin_expect()) where branch
73 reordering would make the code awkward.
74
75 Portability: Hashtable always covers full 32bit address space to
76 avoid assumptions about memory layout.
77 Portability: Try hard to enforce mapping of mmapped memory into
78 32bit address space, even on 64bit systems.
79 Portability: Provide a dnmalloc_pthread_init() function, since
80 pthread locking on HP-UX only works if initialized
81 after the application has entered main().
82 Portability: On *BSD, pthread_mutex_lock is unusable since it
83 calls malloc, use spinlocks instead.
84 Portability: Dynamically detect whether the heap is within
85 32bit address range (e.g. on Linux x86_64, it isn't).
86 Don't use sbrk() if the heap is mapped to an address
87 outside the 32bit range, since this doesn't work with
88 the hashtable. New macro morecore32bit.
89
90 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
91 FreeBSD/pthread, and Solaris 10 i386/pthread.
92 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
93 might be related to OpenBSD pthread internals (??).
94 Non-treaded version (#undef USE_MALLOC_LOCK)
95 works on OpenBSD.
96
97 further to 1.0:
98 Valgrind client requests inserted (#define USE_VALGRIND)
99 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
100 Portability: minsize = 32 bit on 64bit architecture
101 Minor cleanups
102 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
103 Fix: one more malloc_consolidate segfault
104
105 There may be some bugs left in this version. please use with caution.
106*/
107
108
109
110/* Please read the following papers for documentation:
111
112 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
113 Countermeasures against Current and Future Code Injection Attacks,
114 Proceedings of the Third IEEE International Information Assurance
115 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
116 IEEE, IEEE Press.
117 http://www.fort-knox.org/younany_countermeasures.pdf
118
119 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
120 Eynden. Security of Memory Allocators for C and C++. Technical Report
121 CW419, Departement Computerwetenschappen, Katholieke Universiteit
122 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
123
124 */
125
126/* Compile:
127 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
128 "Link":
129 Dynamic:
130 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
131 Static:
132 ar -rv libdnmalloc.a dnmalloc-portable.o
133
134*/
135
136/*
137 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
138 dlmalloc was released as public domain and contained the following license:
139
140 "This is a version (aka dlmalloc) of malloc/free/realloc written by
141 Doug Lea and released to the public domain. Use, modify, and
142 redistribute this code without permission or acknowledgement in any
143 way you wish. Send questions, comments, complaints, performance
144 data, etc to dl@cs.oswego.edu
145
146 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
147
148 Note: There may be an updated version of this malloc obtainable at
149 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
150 Check before installing!"
151
152*/
153
154/* The following preprocessor macros are tested,
155 * and hence should have #define directives:
156 *
157 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
158 *
159 * HAVE_UNISTD_H Define to #include <unistd.h>
160 *
161 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
162 * HAVE_WRITEV Define if the 'writev' function is available
163 *
164 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
165 *
166 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
167 *
168 * HAVE_FCNTL_H Define to #include <fcntl.h>
169 *
170 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
171 * HAVE_MMAP Define if the 'mmap' function is available.
172 *
173 * HAVE_SCHED_H Define to #include <sched.h>
174 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
175 */
176
177
178/*
179 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
180 compiler, or a C compiler sufficiently close to ANSI to get away
181 with it.
182*/
183
184#ifdef HAVE_CONFIG_H
185#include "config.h"
186#endif
187
188#ifdef USE_VALGRIND
189#include <valgrind/memcheck.h>
190#else
191#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
192#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
193#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
194#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
195#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
196#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
197#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
198#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
199#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
200#endif
201
202#if defined (__GNUC__) && __GNUC__ > 2
203# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
204# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
205# define __attribute_malloc__ __attribute__ ((__malloc__))
206#else
207# define LIKELY(x) (x)
208# define UNLIKELY(x) (x)
209# define __attribute_malloc__ /* Ignore */
210#endif
211
212/*
213 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
214 large blocks. This is currently only possible on Linux with
215 kernel versions newer than 1.3.77.
216*/
217
218#ifndef HAVE_MREMAP
219#ifdef linux
220#define HAVE_MREMAP 1
221#define _GNU_SOURCE
222#else
223#define HAVE_MREMAP 0
224#endif
225#endif /* HAVE_MREMAP */
226
227
228
229#ifndef __STD_C
230#if defined(__STDC__) || defined(_cplusplus)
231#define __STD_C 1
232#else
233#define __STD_C 0
234#endif
235#endif /*__STD_C*/
236
237
238/*
239 Void_t* is the pointer type that malloc should say it returns
240*/
241
242#ifndef Void_t
243#if (__STD_C || defined(WIN32))
244#define Void_t void
245#else
246#define Void_t char
247#endif
248#endif /*Void_t*/
249
250#if __STD_C
251#include <stddef.h> /* for size_t */
252#else
253#include <sys/types.h>
254#endif
255
256#if !defined(USE_SYSTEM_MALLOC)
257
258#ifdef __cplusplus
259extern "C" {
260#endif
261
262/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
263
264#ifdef HAVE_UNISTD_H
265#include <unistd.h>
266#endif
267
268#ifdef HAVE_SYS_UIO_H
269#include <sys/uio.h>
270#endif
271
272#include <stdio.h> /* needed for malloc_stats */
273#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
274
275#include <string.h>
276#include <stdlib.h>
277
278#include <sys/resource.h>
279
280extern int errno;
281
282 /* 0: lazy,
283 * 1: medium (assertions compiled in),
284 * 2: high (guard pages at end of hash table and ciregions)
285 * 3: paranoid (guards at end of each allocated chunk, check at free)
286 */
287#ifndef PARANOIA
288#define PARANOIA 9
289#endif
290
291 /* Using assert() with multithreading will cause the code
292 * to deadlock since glibc __assert_fail will call malloc().
293 * We need our very own assert().
294 */
295typedef void assert_handler_tp(const char * error, const char *file, int line);
296
297#if PARANOIA > 0
298
299#ifdef NDEBUG
300#undef NDEBUG
301#endif
302
303static void default_assert_handler(const char *error,
304 const char *file, int line)
305{
306#ifdef HAVE_WRITEV
307 struct iovec iov[5];
308 char * i1 = "assertion failed (";
309 char * i3 = "): ";
310 char * i5 = "\n";
311
312 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
313 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
314 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
315 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
316 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
317 writev(STDERR_FILENO, iov, 5);
318#else
319 fputs("assertion failed (", stderr);
320 fputs(file, stderr);
321 fputs("): ", stderr);
322 fputs(error, stderr);
323 fputc('\n', stderr);
324#endif
325 (void) line;
326 abort();
327}
328static assert_handler_tp *assert_handler = default_assert_handler;
329
330
331#define assert(x) \
332 do { \
333 if (UNLIKELY(!(x))) { \
334 assert_handler(#x, __FILE__, __LINE__); \
335 } \
336 } while (0)
337
338#else
339
340static assert_handler_tp *assert_handler = NULL;
341#define NDEBUG
342#define assert(x) ((void)0)
343
344#endif
345
346assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
347{
348 assert_handler_tp *old = assert_handler;
349 assert_handler = new;
350 return old;
351}
352
353
354#include <stdarg.h>
355
356 /* define for debugging */
357 /* #define DNMALLOC_DEBUG */
358
359 /* Do some extra checks? if not, covered by assrt()s */
360 /* #define DNMALLOC_CHECKS */
361
362 /*
363 The unsigned integer type used for comparing any two chunk sizes.
364 This should be at least as wide as size_t, but should not be signed.
365 */
366
367#ifndef CHUNK_SIZE_T
368#define CHUNK_SIZE_T unsigned long
369#endif
370
371/*
372 The unsigned integer type used to hold addresses when they are are
373 manipulated as integers. Except that it is not defined on all
374 systems, intptr_t would suffice.
375*/
376#ifndef PTR_UINT
377#define PTR_UINT unsigned long
378#endif
379
380
381/*
382 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
383 of chunk sizes.
384
385 The default version is the same as size_t.
386
387 While not strictly necessary, it is best to define this as an
388 unsigned type, even if size_t is a signed type. This may avoid some
389 artificial size limitations on some systems.
390
391 On a 64-bit machine, you may be able to reduce malloc overhead by
392 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
393 expense of not being able to handle more than 2^32 of malloced
394 space. If this limitation is acceptable, you are encouraged to set
395 this unless you are on a platform requiring 16byte alignments. In
396 this case the alignment requirements turn out to negate any
397 potential advantages of decreasing size_t word size.
398
399 Implementors: Beware of the possible combinations of:
400 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
401 and might be the same width as int or as long
402 - size_t might have different width and signedness as INTERNAL_SIZE_T
403 - int and long might be 32 or 64 bits, and might be the same width
404 To deal with this, most comparisons and difference computations
405 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
406 aware of the fact that casting an unsigned int to a wider long does
407 not sign-extend. (This also makes checking for negative numbers
408 awkward.) Some of these casts result in harmless compiler warnings
409 on some systems.
410*/
411
412#ifndef INTERNAL_SIZE_T
413#define INTERNAL_SIZE_T size_t
414#endif
415
416/* The corresponding word size */
417#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
418
419
420
421/*
422 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
423 It must be a power of two at least 2 * SIZE_SZ, even on machines
424 for which smaller alignments would suffice. It may be defined as
425 larger than this though. Note however that code and data structures
426 are optimized for the case of 8-byte alignment.
427*/
428
429
430#ifndef MALLOC_ALIGNMENT
431#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
432#endif
433
434/* The corresponding bit mask value */
435#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
436
437
438
439/*
440 REALLOC_ZERO_BYTES_FREES should be set if a call to
441 realloc with zero bytes should be the same as a call to free.
442 Some people think it should. Otherwise, since this malloc
443 returns a unique pointer for malloc(0), so does realloc(p, 0).
444*/
445
446#define REALLOC_ZERO_BYTES_FREES
447
448/*
449 TRIM_FASTBINS controls whether free() of a very small chunk can
450 immediately lead to trimming. Setting to true (1) can reduce memory
451 footprint, but will almost always slow down programs that use a lot
452 of small chunks.
453
454 Define this only if you are willing to give up some speed to more
455 aggressively reduce system-level memory footprint when releasing
456 memory in programs that use many small chunks. You can get
457 essentially the same effect by setting MXFAST to 0, but this can
458 lead to even greater slowdowns in programs using many small chunks.
459 TRIM_FASTBINS is an in-between compile-time option, that disables
460 only those chunks bordering topmost memory from being placed in
461 fastbins.
462*/
463
464#ifndef TRIM_FASTBINS
465#define TRIM_FASTBINS 0
466#endif
467
468
469/*
470 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
471 This is necessary when you only want to use this malloc in one part
472 of a program, using your regular system malloc elsewhere.
473*/
474
475/* #define USE_DL_PREFIX */
476
477
478/*
479 USE_MALLOC_LOCK causes wrapper functions to surround each
480 callable routine with pthread mutex lock/unlock.
481
482 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
483*/
484
485/* #define USE_MALLOC_LOCK */
486
487
488/*
489 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
490 actually a wrapper function that first calls MALLOC_PREACTION, then
491 calls the internal routine, and follows it with
492 MALLOC_POSTACTION. This is needed for locking, but you can also use
493 this, without USE_MALLOC_LOCK, for purposes of interception,
494 instrumentation, etc. It is a sad fact that using wrappers often
495 noticeably degrades performance of malloc-intensive programs.
496*/
497
498
499#ifdef USE_MALLOC_LOCK
500#define USE_PUBLIC_MALLOC_WRAPPERS
501#else
502/* #define USE_PUBLIC_MALLOC_WRAPPERS */
503#endif
504
505
506/*
507 Two-phase name translation.
508 All of the actual routines are given mangled names.
509 When wrappers are used, they become the public callable versions.
510 When DL_PREFIX is used, the callable names are prefixed.
511*/
512
513#ifndef USE_PUBLIC_MALLOC_WRAPPERS
514#define cALLOc public_cALLOc
515#define fREe public_fREe
516#define mALLOc public_mALLOc
517#define mEMALIGn public_mEMALIGn
518#define posix_mEMALIGn public_posix_mEMALIGn
519#define rEALLOc public_rEALLOc
520#define vALLOc public_vALLOc
521#define pVALLOc public_pVALLOc
522#define mALLINFo public_mALLINFo
523#define mALLOPt public_mALLOPt
524#define mTRIm public_mTRIm
525#define mSTATs public_mSTATs
526#define mUSABLe public_mUSABLe
527#endif
528
529#ifdef USE_DL_PREFIX
530#define public_cALLOc dlcalloc
531#define public_fREe dlfree
532#define public_mALLOc dlmalloc
533#define public_mEMALIGn dlmemalign
534#define public_posix_mEMALIGn dlposix_memalign
535#define public_rEALLOc dlrealloc
536#define public_vALLOc dlvalloc
537#define public_pVALLOc dlpvalloc
538#define public_mALLINFo dlmallinfo
539#define public_mALLOPt dlmallopt
540#define public_mTRIm dlmalloc_trim
541#define public_mSTATs dlmalloc_stats
542#define public_mUSABLe dlmalloc_usable_size
543#else /* USE_DL_PREFIX */
544#define public_cALLOc calloc
545#define public_fREe free
546#define public_mALLOc malloc
547#define public_mEMALIGn memalign
548#define public_posix_mEMALIGn posix_memalign
549#define public_rEALLOc realloc
550#define public_vALLOc valloc
551#define public_pVALLOc pvalloc
552#define public_mALLINFo mallinfo
553#define public_mALLOPt mallopt
554#define public_mTRIm malloc_trim
555#define public_mSTATs malloc_stats
556#define public_mUSABLe malloc_usable_size
557#endif /* USE_DL_PREFIX */
558
559
560/*
561 HAVE_MEMCPY should be defined if you are not otherwise using
562 ANSI STD C, but still have memcpy and memset in your C library
563 and want to use them in calloc and realloc. Otherwise simple
564 macro versions are defined below.
565
566 USE_MEMCPY should be defined as 1 if you actually want to
567 have memset and memcpy called. People report that the macro
568 versions are faster than libc versions on some systems.
569
570 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
571 (of <= 36 bytes) are manually unrolled in realloc and calloc.
572*/
573
574#ifndef HAVE_MEMCPY
575#define HAVE_MEMCPY
576#endif
577
578#ifndef USE_MEMCPY
579#ifdef HAVE_MEMCPY
580#define USE_MEMCPY 1
581#else
582#define USE_MEMCPY 0
583#endif
584#endif
585
586
587#if (__STD_C || defined(HAVE_MEMCPY))
588
589#ifdef WIN32
590 /* On Win32 memset and memcpy are already declared in windows.h */
591#else
592#if __STD_C
593 /* Defined in string.h */
594#else
595Void_t* memset();
596Void_t* memcpy();
597#endif
598#endif
599#endif
600
601/*
602 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
603 malloc fails to be able to return memory, either because memory is
604 exhausted or because of illegal arguments.
605
606 By default, sets errno if running on STD_C platform, else does nothing.
607*/
608
609#ifndef MALLOC_FAILURE_ACTION
610#if __STD_C
611#define MALLOC_FAILURE_ACTION \
612 errno = ENOMEM;
613
614#else
615#define MALLOC_FAILURE_ACTION
616#endif
617#endif
618
619/*
620 MORECORE-related declarations. By default, rely on sbrk
621*/
622
623
624#if !defined(HAVE_UNISTD_H)
625#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
626#if __STD_C
627extern Void_t* sbrk(ptrdiff_t);
628#else
629extern Void_t* sbrk();
630#endif
631#endif
632#endif
633
634/*
635 MORECORE_FAILURE is the value returned upon failure of MORECORE
636 as well as mmap. Since it cannot be an otherwise valid memory address,
637 and must reflect values of standard sys calls, you probably ought not
638 try to redefine it.
639*/
640
641#ifndef MORECORE_FAILURE
642#define MORECORE_FAILURE ((void*)(-1UL))
643#endif
644
645/*
646 MORECORE is the name of the routine to call to obtain more memory
647 from the system. See below for general guidance on writing
648 alternative MORECORE functions, as well as a version for WIN32 and a
649 sample version for pre-OSX macos.
650*/
651
652#ifndef MORECORE
653#define MORECORE sbrk
654#endif
655
656
657/*
658 If MORECORE_CONTIGUOUS is true, take advantage of fact that
659 consecutive calls to MORECORE with positive arguments always return
660 contiguous increasing addresses. This is true of unix sbrk. Even
661 if not defined, when regions happen to be contiguous, malloc will
662 permit allocations spanning regions obtained from different
663 calls. But defining this when applicable enables some stronger
664 consistency checks and space efficiencies.
665*/
666
667#ifndef MORECORE_CONTIGUOUS
668#define MORECORE_CONTIGUOUS 1
669#endif
670
671/*
672 Define MORECORE_CANNOT_TRIM if your version of MORECORE
673 cannot release space back to the system when given negative
674 arguments. This is generally necessary only if you are using
675 a hand-crafted MORECORE function that cannot handle negative arguments.
676*/
677
678/* #define MORECORE_CANNOT_TRIM */
679
680
681/*
682 This malloc requires mmap for heap management data. It is an error
683 if mmap is not available.
684
685 Additionally, mmap will be used to satisfy large requests.
686*/
687
688#ifndef HAVE_MMAP
689# error HAVE_MMAP not defined, has your operating system mmap?
690#endif
691
692/*
693 Standard unix mmap using /dev/zero clears memory so calloc doesn't
694 need to.
695*/
696
697#ifndef MMAP_CLEARS
698#define MMAP_CLEARS 1
699#endif
700
701
702/*
703 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
704 sbrk fails, and mmap is used as a backup (which is done only if
705 HAVE_MMAP). The value must be a multiple of page size. This
706 backup strategy generally applies only when systems have "holes" in
707 address space, so sbrk cannot perform contiguous expansion, but
708 there is still space available on system. On systems for which
709 this is known to be useful (i.e. most linux kernels), this occurs
710 only when programs allocate huge amounts of memory. Between this,
711 and the fact that mmap regions tend to be limited, the size should
712 be large, to avoid too many mmap calls and thus avoid running out
713 of kernel resources.
714*/
715
716#ifndef MMAP_AS_MORECORE_SIZE
717#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
718#endif
719
720
721/*
722 The system page size. To the extent possible, this malloc manages
723 memory from the system in page-size units. Note that this value is
724 cached during initialization into a field of malloc_state. So even
725 if malloc_getpagesize is a function, it is only called once.
726
727 The following mechanics for getpagesize were adapted from bsd/gnu
728 getpagesize.h. If none of the system-probes here apply, a value of
729 4096 is used, which should be OK: If they don't apply, then using
730 the actual value probably doesn't impact performance.
731*/
732
733
734#ifndef malloc_getpagesize
735
736# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
737# ifndef _SC_PAGE_SIZE
738# define _SC_PAGE_SIZE _SC_PAGESIZE
739# endif
740# endif
741
742# ifdef _SC_PAGE_SIZE
743# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
744# else
745# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
746 extern size_t getpagesize();
747# define malloc_getpagesize getpagesize()
748# else
749# ifdef WIN32 /* use supplied emulation of getpagesize */
750# define malloc_getpagesize getpagesize()
751# else
752# if defined(HAVE_SYS_PARAM_H)
753# include <sys/param.h>
754# endif
755# ifdef EXEC_PAGESIZE
756# define malloc_getpagesize EXEC_PAGESIZE
757# else
758# ifdef NBPG
759# ifndef CLSIZE
760# define malloc_getpagesize NBPG
761# else
762# define malloc_getpagesize (NBPG * CLSIZE)
763# endif
764# else
765# ifdef NBPC
766# define malloc_getpagesize NBPC
767# else
768# ifdef PAGESIZE
769# define malloc_getpagesize PAGESIZE
770# else /* just guess */
771# define malloc_getpagesize (4096)
772# endif
773# endif
774# endif
775# endif
776# endif
777# endif
778# endif
779#endif
780
781/*
782 This version of malloc supports the standard SVID/XPG mallinfo
783 routine that returns a struct containing usage properties and
784 statistics. It should work on any SVID/XPG compliant system that has
785 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
786 install such a thing yourself, cut out the preliminary declarations
787 as described above and below and save them in a malloc.h file. But
788 there's no compelling reason to bother to do this.)
789
790 The main declaration needed is the mallinfo struct that is returned
791 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
792 bunch of fields that are not even meaningful in this version of
793 malloc. These fields are are instead filled by mallinfo() with
794 other numbers that might be of interest.
795
796 HAVE_MALLOC_H should be set if you have a
797 /usr/include/malloc.h file that includes a declaration of struct
798 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
799 version is declared below. These must be precisely the same for
800 mallinfo() to work. The original SVID version of this struct,
801 defined on most systems with mallinfo, declares all fields as
802 ints. But some others define as unsigned long. If your system
803 defines the fields using a type of different width than listed here,
804 you must #include your system version and #define
805 HAVE_MALLOC_H.
806*/
807
808/* #define HAVE_MALLOC_H */
809
810/* On *BSD, malloc.h is deprecated, and on some *BSD including
811 * it may actually raise an error.
812 */
813#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
814#include <malloc.h>
815#else
816
817/* SVID2/XPG mallinfo structure */
818
819struct mallinfo {
820 int arena; /* non-mmapped space allocated from system */
821 int ordblks; /* number of free chunks */
822 int smblks; /* number of fastbin blocks */
823 int hblks; /* number of mmapped regions */
824 int hblkhd; /* space in mmapped regions */
825 int usmblks; /* maximum total allocated space */
826 int fsmblks; /* space available in freed fastbin blocks */
827 int uordblks; /* total allocated space */
828 int fordblks; /* total free space */
829 int keepcost; /* top-most, releasable (via malloc_trim) space */
830};
831
832/*
833 SVID/XPG defines four standard parameter numbers for mallopt,
834 normally defined in malloc.h. Only one of these (M_MXFAST) is used
835 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
836 so setting them has no effect. But this malloc also supports other
837 options in mallopt described below.
838*/
839#endif
840
841
842/* ---------- description of public routines ------------ */
843
844/*
845 malloc(size_t n)
846 Returns a pointer to a newly allocated chunk of at least n bytes, or null
847 if no space is available. Additionally, on failure, errno is
848 set to ENOMEM on ANSI C systems.
849
850 If n is zero, malloc returns a minumum-sized chunk. (The minimum
851 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
852 systems.) On most systems, size_t is an unsigned type, so calls
853 with negative arguments are interpreted as requests for huge amounts
854 of space, which will often fail. The maximum supported value of n
855 differs across systems, but is in all cases less than the maximum
856 representable value of a size_t.
857*/
858#if __STD_C
859Void_t* public_mALLOc(size_t) __attribute_malloc__;
860#else
861Void_t* public_mALLOc();
862#endif
863
864/*
865 free(Void_t* p)
866 Releases the chunk of memory pointed to by p, that had been previously
867 allocated using malloc or a related routine such as realloc.
868 It has no effect if p is null. It can have arbitrary (i.e., bad!)
869 effects if p has already been freed.
870
871 Unless disabled (using mallopt), freeing very large spaces will
872 when possible, automatically trigger operations that give
873 back unused memory to the system, thus reducing program footprint.
874*/
875#if __STD_C
876void public_fREe(Void_t*);
877#else
878void public_fREe();
879#endif
880
881/*
882 calloc(size_t n_elements, size_t element_size);
883 Returns a pointer to n_elements * element_size bytes, with all locations
884 set to zero.
885*/
886#if __STD_C
887Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
888#else
889Void_t* public_cALLOc();
890#endif
891
892/*
893 realloc(Void_t* p, size_t n)
894 Returns a pointer to a chunk of size n that contains the same data
895 as does chunk p up to the minimum of (n, p's size) bytes, or null
896 if no space is available.
897
898 The returned pointer may or may not be the same as p. The algorithm
899 prefers extending p when possible, otherwise it employs the
900 equivalent of a malloc-copy-free sequence.
901
902 If p is null, realloc is equivalent to malloc.
903
904 If space is not available, realloc returns null, errno is set (if on
905 ANSI) and p is NOT freed.
906
907 if n is for fewer bytes than already held by p, the newly unused
908 space is lopped off and freed if possible. Unless the #define
909 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
910 zero (re)allocates a minimum-sized chunk.
911
912 Large chunks that were internally obtained via mmap will always
913 be reallocated using malloc-copy-free sequences unless
914 the system supports MREMAP (currently only linux).
915
916 The old unix realloc convention of allowing the last-free'd chunk
917 to be used as an argument to realloc is not supported.
918*/
919#if __STD_C
920Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
921#else
922Void_t* public_rEALLOc();
923#endif
924
925/*
926 memalign(size_t alignment, size_t n);
927 Returns a pointer to a newly allocated chunk of n bytes, aligned
928 in accord with the alignment argument.
929
930 The alignment argument should be a power of two. If the argument is
931 not a power of two, the nearest greater power is used.
932 8-byte alignment is guaranteed by normal malloc calls, so don't
933 bother calling memalign with an argument of 8 or less.
934
935 Overreliance on memalign is a sure way to fragment space.
936*/
937#if __STD_C
938Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
939#else
940Void_t* public_mEMALIGn();
941#endif
942
943/*
944 posix_memalign(void** memptr, size_t alignment, size_t n);
945 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
946 in accord with the alignment argument. Returns 0 on success, otherwise
947 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
948
949 The alignment must be a power of two, and a multiple of sizeof(void *).
950*/
951#if __STD_C
952int public_posix_mEMALIGn(Void_t**, size_t, size_t);
953#else
954int public_posix_mEMALIGn();
955#endif
956
957/*
958 valloc(size_t n);
959 Equivalent to memalign(pagesize, n), where pagesize is the page
960 size of the system. If the pagesize is unknown, 4096 is used.
961*/
962#if __STD_C
963Void_t* public_vALLOc(size_t) __attribute_malloc__;
964#else
965Void_t* public_vALLOc();
966#endif
967
968
969
970/*
971 mallopt(int parameter_number, int parameter_value)
972 Sets tunable parameters The format is to provide a
973 (parameter-number, parameter-value) pair. mallopt then sets the
974 corresponding parameter to the argument value if it can (i.e., so
975 long as the value is meaningful), and returns 1 if successful else
976 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
977 normally defined in malloc.h. Only one of these (M_MXFAST) is used
978 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
979 so setting them has no effect. But this malloc also supports four
980 other options in mallopt. See below for details. Briefly, supported
981 parameters are as follows (listed defaults are for "typical"
982 configurations).
983
984 Symbol param # default allowed param values
985 M_MXFAST 1 64 0-80 (0 disables fastbins)
986 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
987 M_TOP_PAD -2 0 any
988 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
989 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
990*/
991#if __STD_C
992int public_mALLOPt(int, int);
993#else
994int public_mALLOPt();
995#endif
996
997
998/*
999 mallinfo()
1000 Returns (by copy) a struct containing various summary statistics:
1001
1002 arena: current total non-mmapped bytes allocated from system
1003 ordblks: the number of free chunks
1004 smblks: the number of fastbin blocks (i.e., small chunks that
1005 have been freed but not use resused or consolidated)
1006 hblks: current number of mmapped regions
1007 hblkhd: total bytes held in mmapped regions
1008 usmblks: the maximum total allocated space. This will be greater
1009 than current total if trimming has occurred.
1010 fsmblks: total bytes held in fastbin blocks
1011 uordblks: current total allocated space (normal or mmapped)
1012 fordblks: total free space
1013 keepcost: the maximum number of bytes that could ideally be released
1014 back to system via malloc_trim. ("ideally" means that
1015 it ignores page restrictions etc.)
1016
1017 Because these fields are ints, but internal bookkeeping may
1018 be kept as longs, the reported values may wrap around zero and
1019 thus be inaccurate.
1020*/
1021#if __STD_C
1022struct mallinfo public_mALLINFo(void);
1023#else
1024struct mallinfo public_mALLINFo();
1025#endif
1026
1027/*
1028 pvalloc(size_t n);
1029 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1030 round up n to nearest pagesize.
1031 */
1032#if __STD_C
1033Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1034#else
1035Void_t* public_pVALLOc();
1036#endif
1037
1038/*
1039 malloc_trim(size_t pad);
1040
1041 If possible, gives memory back to the system (via negative
1042 arguments to sbrk) if there is unused memory at the `high' end of
1043 the malloc pool. You can call this after freeing large blocks of
1044 memory to potentially reduce the system-level memory requirements
1045 of a program. However, it cannot guarantee to reduce memory. Under
1046 some allocation patterns, some large free blocks of memory will be
1047 locked between two used chunks, so they cannot be given back to
1048 the system.
1049
1050 The `pad' argument to malloc_trim represents the amount of free
1051 trailing space to leave untrimmed. If this argument is zero,
1052 only the minimum amount of memory to maintain internal data
1053 structures will be left (one page or less). Non-zero arguments
1054 can be supplied to maintain enough trailing space to service
1055 future expected allocations without having to re-obtain memory
1056 from the system.
1057
1058 Malloc_trim returns 1 if it actually released any memory, else 0.
1059 On systems that do not support "negative sbrks", it will always
1060 rreturn 0.
1061*/
1062#if __STD_C
1063int public_mTRIm(size_t);
1064#else
1065int public_mTRIm();
1066#endif
1067
1068/*
1069 malloc_usable_size(Void_t* p);
1070
1071 Returns the number of bytes you can actually use in
1072 an allocated chunk, which may be more than you requested (although
1073 often not) due to alignment and minimum size constraints.
1074 You can use this many bytes without worrying about
1075 overwriting other allocated objects. This is not a particularly great
1076 programming practice. malloc_usable_size can be more useful in
1077 debugging and assertions, for example:
1078
1079 p = malloc(n);
1080 assert(malloc_usable_size(p) >= 256);
1081
1082*/
1083#if __STD_C
1084size_t public_mUSABLe(Void_t*);
1085#else
1086size_t public_mUSABLe();
1087#endif
1088
1089/*
1090 malloc_stats();
1091 Prints on stderr the amount of space obtained from the system (both
1092 via sbrk and mmap), the maximum amount (which may be more than
1093 current if malloc_trim and/or munmap got called), and the current
1094 number of bytes allocated via malloc (or realloc, etc) but not yet
1095 freed. Note that this is the number of bytes allocated, not the
1096 number requested. It will be larger than the number requested
1097 because of alignment and bookkeeping overhead. Because it includes
1098 alignment wastage as being in use, this figure may be greater than
1099 zero even when no user-level chunks are allocated.
1100
1101 The reported current and maximum system memory can be inaccurate if
1102 a program makes other calls to system memory allocation functions
1103 (normally sbrk) outside of malloc.
1104
1105 malloc_stats prints only the most commonly interesting statistics.
1106 More information can be obtained by calling mallinfo.
1107
1108*/
1109#if __STD_C
1110void public_mSTATs();
1111#else
1112void public_mSTATs();
1113#endif
1114
1115/* mallopt tuning options */
1116
1117/*
1118 M_MXFAST is the maximum request size used for "fastbins", special bins
1119 that hold returned chunks without consolidating their spaces. This
1120 enables future requests for chunks of the same size to be handled
1121 very quickly, but can increase fragmentation, and thus increase the
1122 overall memory footprint of a program.
1123
1124 This malloc manages fastbins very conservatively yet still
1125 efficiently, so fragmentation is rarely a problem for values less
1126 than or equal to the default. The maximum supported value of MXFAST
1127 is 80. You wouldn't want it any higher than this anyway. Fastbins
1128 are designed especially for use with many small structs, objects or
1129 strings -- the default handles structs/objects/arrays with sizes up
1130 to 16 4byte fields, or small strings representing words, tokens,
1131 etc. Using fastbins for larger objects normally worsens
1132 fragmentation without improving speed.
1133
1134 M_MXFAST is set in REQUEST size units. It is internally used in
1135 chunksize units, which adds padding and alignment. You can reduce
1136 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1137 algorithm to be a closer approximation of fifo-best-fit in all cases,
1138 not just for larger requests, but will generally cause it to be
1139 slower.
1140*/
1141
1142
1143/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1144#ifndef M_MXFAST
1145#define M_MXFAST 1
1146#endif
1147
1148#ifndef DEFAULT_MXFAST
1149#define DEFAULT_MXFAST 64
1150#endif
1151
1152
1153/*
1154 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1155 to keep before releasing via malloc_trim in free().
1156
1157 Automatic trimming is mainly useful in long-lived programs.
1158 Because trimming via sbrk can be slow on some systems, and can
1159 sometimes be wasteful (in cases where programs immediately
1160 afterward allocate more large chunks) the value should be high
1161 enough so that your overall system performance would improve by
1162 releasing this much memory.
1163
1164 The trim threshold and the mmap control parameters (see below)
1165 can be traded off with one another. Trimming and mmapping are
1166 two different ways of releasing unused memory back to the
1167 system. Between these two, it is often possible to keep
1168 system-level demands of a long-lived program down to a bare
1169 minimum. For example, in one test suite of sessions measuring
1170 the XF86 X server on Linux, using a trim threshold of 128K and a
1171 mmap threshold of 192K led to near-minimal long term resource
1172 consumption.
1173
1174 If you are using this malloc in a long-lived program, it should
1175 pay to experiment with these values. As a rough guide, you
1176 might set to a value close to the average size of a process
1177 (program) running on your system. Releasing this much memory
1178 would allow such a process to run in memory. Generally, it's
1179 worth it to tune for trimming rather tham memory mapping when a
1180 program undergoes phases where several large chunks are
1181 allocated and released in ways that can reuse each other's
1182 storage, perhaps mixed with phases where there are no such
1183 chunks at all. And in well-behaved long-lived programs,
1184 controlling release of large blocks via trimming versus mapping
1185 is usually faster.
1186
1187 However, in most programs, these parameters serve mainly as
1188 protection against the system-level effects of carrying around
1189 massive amounts of unneeded memory. Since frequent calls to
1190 sbrk, mmap, and munmap otherwise degrade performance, the default
1191 parameters are set to relatively high values that serve only as
1192 safeguards.
1193
1194 The trim value must be greater than page size to have any useful
1195 effect. To disable trimming completely, you can set to
1196 (unsigned long)(-1)
1197
1198 Trim settings interact with fastbin (MXFAST) settings: Unless
1199 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1200 freeing a chunk with size less than or equal to MXFAST. Trimming is
1201 instead delayed until subsequent freeing of larger chunks. However,
1202 you can still force an attempted trim by calling malloc_trim.
1203
1204 Also, trimming is not generally possible in cases where
1205 the main arena is obtained via mmap.
1206
1207 Note that the trick some people use of mallocing a huge space and
1208 then freeing it at program startup, in an attempt to reserve system
1209 memory, doesn't have the intended effect under automatic trimming,
1210 since that memory will immediately be returned to the system.
1211*/
1212
1213#define M_TRIM_THRESHOLD -1
1214
1215#ifndef DEFAULT_TRIM_THRESHOLD
1216#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1217#endif
1218
1219/*
1220 M_TOP_PAD is the amount of extra `padding' space to allocate or
1221 retain whenever sbrk is called. It is used in two ways internally:
1222
1223 * When sbrk is called to extend the top of the arena to satisfy
1224 a new malloc request, this much padding is added to the sbrk
1225 request.
1226
1227 * When malloc_trim is called automatically from free(),
1228 it is used as the `pad' argument.
1229
1230 In both cases, the actual amount of padding is rounded
1231 so that the end of the arena is always a system page boundary.
1232
1233 The main reason for using padding is to avoid calling sbrk so
1234 often. Having even a small pad greatly reduces the likelihood
1235 that nearly every malloc request during program start-up (or
1236 after trimming) will invoke sbrk, which needlessly wastes
1237 time.
1238
1239 Automatic rounding-up to page-size units is normally sufficient
1240 to avoid measurable overhead, so the default is 0. However, in
1241 systems where sbrk is relatively slow, it can pay to increase
1242 this value, at the expense of carrying around more memory than
1243 the program needs.
1244*/
1245
1246#define M_TOP_PAD -2
1247
1248#ifndef DEFAULT_TOP_PAD
1249#define DEFAULT_TOP_PAD (0)
1250#endif
1251
1252/*
1253 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1254 to service a request. Requests of at least this size that cannot
1255 be allocated using already-existing space will be serviced via mmap.
1256 (If enough normal freed space already exists it is used instead.)
1257
1258 Using mmap segregates relatively large chunks of memory so that
1259 they can be individually obtained and released from the host
1260 system. A request serviced through mmap is never reused by any
1261 other request (at least not directly; the system may just so
1262 happen to remap successive requests to the same locations).
1263
1264 Segregating space in this way has the benefits that:
1265
1266 1. Mmapped space can ALWAYS be individually released back
1267 to the system, which helps keep the system level memory
1268 demands of a long-lived program low.
1269 2. Mapped memory can never become `locked' between
1270 other chunks, as can happen with normally allocated chunks, which
1271 means that even trimming via malloc_trim would not release them.
1272 3. On some systems with "holes" in address spaces, mmap can obtain
1273 memory that sbrk cannot.
1274
1275 However, it has the disadvantages that:
1276
1277 1. The space cannot be reclaimed, consolidated, and then
1278 used to service later requests, as happens with normal chunks.
1279 2. It can lead to more wastage because of mmap page alignment
1280 requirements
1281 3. It causes malloc performance to be more dependent on host
1282 system memory management support routines which may vary in
1283 implementation quality and may impose arbitrary
1284 limitations. Generally, servicing a request via normal
1285 malloc steps is faster than going through a system's mmap.
1286
1287 The advantages of mmap nearly always outweigh disadvantages for
1288 "large" chunks, but the value of "large" varies across systems. The
1289 default is an empirically derived value that works well in most
1290 systems.
1291*/
1292
1293#define M_MMAP_THRESHOLD -3
1294
1295#ifndef DEFAULT_MMAP_THRESHOLD
1296#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1297#endif
1298
1299/*
1300 M_MMAP_MAX is the maximum number of requests to simultaneously
1301 service using mmap. This parameter exists because
1302. Some systems have a limited number of internal tables for
1303 use by mmap, and using more than a few of them may degrade
1304 performance.
1305
1306 The default is set to a value that serves only as a safeguard.
1307 Setting to 0 disables use of mmap for servicing large requests. If
1308 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1309 to non-zero values in mallopt will fail.
1310*/
1311
1312#define M_MMAP_MAX -4
1313
1314#ifndef DEFAULT_MMAP_MAX
1315#define DEFAULT_MMAP_MAX (65536)
1316#endif
1317
1318#ifdef __cplusplus
1319}; /* end of extern "C" */
1320#endif
1321
1322/*
1323 ========================================================================
1324 To make a fully customizable malloc.h header file, cut everything
1325 above this line, put into file malloc.h, edit to suit, and #include it
1326 on the next line, as well as in programs that use this malloc.
1327 ========================================================================
1328*/
1329
1330/* #include "malloc.h" */
1331
1332/* --------------------- public wrappers ---------------------- */
1333
1334#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1335
1336/* DL_STATIC used to make functions (deep down) consistent
1337 * with prototypes (otherwise the prototypes are static
1338 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1339 * The gcc compiler doesn't care, but the HP-UX compiler does.
1340 */
1341#define DL_STATIC static
1342
1343/* Declare all routines as internal */
1344#if __STD_C
1345static Void_t* mALLOc(size_t) __attribute_malloc__;
1346static void fREe(Void_t*);
1347static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1348static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1349static int posix_mEMALIGn(Void_t**, size_t, size_t);
1350static Void_t* vALLOc(size_t) __attribute_malloc__;
1351static Void_t* pVALLOc(size_t) __attribute_malloc__;
1352static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1353static int mTRIm(size_t);
1354static size_t mUSABLe(Void_t*);
1355static void mSTATs();
1356static int mALLOPt(int, int);
1357static struct mallinfo mALLINFo(void);
1358#else
1359static Void_t* mALLOc();
1360static void fREe();
1361static Void_t* rEALLOc();
1362static Void_t* mEMALIGn();
1363static int posix_mEMALIGn();
1364static Void_t* vALLOc();
1365static Void_t* pVALLOc();
1366static Void_t* cALLOc();
1367static int mTRIm();
1368static size_t mUSABLe();
1369static void mSTATs();
1370static int mALLOPt();
1371static struct mallinfo mALLINFo();
1372#endif
1373
1374/*
1375 MALLOC_PREACTION and MALLOC_POSTACTION should be
1376 defined to return 0 on success, and nonzero on failure.
1377 The return value of MALLOC_POSTACTION is currently ignored
1378 in wrapper functions since there is no reasonable default
1379 action to take on failure.
1380*/
1381
1382
1383#ifdef USE_MALLOC_LOCK
1384
1385# ifdef WIN32
1386
1387static int mALLOC_MUTEx;
1388#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1389#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1390int dnmalloc_pthread_init(void) { return 0; }
1391
1392# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1393
1394# if defined(__NetBSD__)
1395#include <reentrant.h>
1396extern int __isthreaded;
1397static mutex_t thread_lock = MUTEX_INITIALIZER;
1398#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1399#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1400void _malloc_prefork(void) { _MALLOC_LOCK(); }
1401void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1402# endif
1403
1404# if defined(__OpenBSD__)
1405extern int __isthreaded;
1406void _thread_malloc_lock(void);
1407void _thread_malloc_unlock(void);
1408#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1409#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1410# endif
1411
1412# if defined(__FreeBSD__)
1413extern int __isthreaded;
1414struct _spinlock {
1415 volatile long access_lock;
1416 volatile long lock_owner;
1417 volatile char *fname;
1418 volatile int lineno;
1419};
1420typedef struct _spinlock spinlock_t;
1421#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1422void _spinlock(spinlock_t *);
1423void _spinunlock(spinlock_t *);
1424/* # include "/usr/src/lib/libc/include/spinlock.h" */
1425static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1426spinlock_t *__malloc_lock = &thread_lock;
1427#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1428#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1429# endif
1430
1431/* Common for all three *BSD
1432 */
1433static int malloc_active = 0;
1434static int dnmalloc_mutex_lock()
1435{
1436 _MALLOC_LOCK();
1437 if (!malloc_active)
1438 {
1439 ++malloc_active;
1440 return 0;
1441 }
1442 assert(malloc_active == 0);
1443 _MALLOC_UNLOCK();
1444 errno = EDEADLK;
1445 return 1;
1446}
1447static int dnmalloc_mutex_unlock()
1448{
1449 --malloc_active;
1450 _MALLOC_UNLOCK();
1451 return 0;
1452}
1453#define MALLOC_PREACTION dnmalloc_mutex_lock()
1454#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1455int dnmalloc_pthread_init(void) { return 0; }
1456
1457# else
1458
1459/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1460 *
1461 * Works fine on linux (no malloc in pthread_mutex_lock)
1462 * Works with on HP-UX if initialized after entering main()
1463 */
1464#include <pthread.h>
1465static int malloc_active = 0;
1466void dnmalloc_fork_prepare(void);
1467void dnmalloc_fork_parent(void);
1468void dnmalloc_fork_child(void);
1469
1470#if !defined(__linux__)
1471
1472static pthread_mutex_t mALLOC_MUTEx;
1473pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1474static int dnmalloc_use_mutex = 0;
1475static void dnmalloc_pthread_init_int(void)
1476{
1477 pthread_mutexattr_t mta;
1478 pthread_mutexattr_init(&mta);
1479 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1480 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1481 pthread_mutexattr_destroy(&mta);
1482 pthread_atfork(dnmalloc_fork_prepare,
1483 dnmalloc_fork_parent,
1484 dnmalloc_fork_child);
1485 dnmalloc_use_mutex = 1;
1486}
1487int dnmalloc_pthread_init(void)
1488{
1489 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1490}
1491
1492#else
1493
1494static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1495static int dnmalloc_use_mutex = 1;
1496int dnmalloc_pthread_init(void) {
1497 return pthread_atfork(dnmalloc_fork_prepare,
1498 dnmalloc_fork_parent,
1499 dnmalloc_fork_child);
1500}
1501#endif /* !defined(__linux__) */
1502
1503void dnmalloc_fork_prepare(void) {
1504 if (dnmalloc_use_mutex)
1505 pthread_mutex_lock(&mALLOC_MUTEx);
1506}
1507void dnmalloc_fork_parent(void) {
1508 if (dnmalloc_use_mutex)
1509 pthread_mutex_unlock(&mALLOC_MUTEx);
1510}
1511void dnmalloc_fork_child(void) {
1512 int rc = 0;
1513#ifdef __GLIBC__
1514 if (dnmalloc_use_mutex)
1515 {
1516 pthread_mutex_unlock (&mALLOC_MUTEx);
1517 pthread_mutex_destroy(&mALLOC_MUTEx);
1518 rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1519 }
1520#else
1521 if (dnmalloc_use_mutex)
1522 rc = pthread_mutex_unlock(&mALLOC_MUTEx);
1523#endif
1524 if (rc != 0)
1525 {
1526 fputs("fork_child failed", stderr);
1527 _exit(EXIT_FAILURE);
1528 }
1529}
1530static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1531{
1532 if (dnmalloc_use_mutex)
1533 {
1534 int rc = pthread_mutex_lock(mutex);
1535 if (rc == 0)
1536 {
1537 if (!malloc_active)
1538 {
1539 ++malloc_active;
1540 return 0;
1541 }
1542 assert(malloc_active == 0);
1543 (void) pthread_mutex_unlock(mutex);
1544 errno = EDEADLK;
1545 return 1;
1546 }
1547 return rc;
1548 }
1549 return 0;
1550}
1551static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1552{
1553 if (dnmalloc_use_mutex)
1554 {
1555 --malloc_active;
1556 return pthread_mutex_unlock(mutex);
1557 }
1558 return 0;
1559}
1560# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1561# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1562
1563# endif
1564
1565#else
1566
1567/* Substitute anything you like for these */
1568
1569# define MALLOC_PREACTION (0)
1570# define MALLOC_POSTACTION (0)
1571int dnmalloc_pthread_init(void) { return 0; }
1572
1573#endif /* USE_MALLOC_LOCK */
1574
1575Void_t* public_mALLOc(size_t bytes) {
1576 Void_t* m;
1577 if (MALLOC_PREACTION == 0) {
1578 m = mALLOc(bytes);
1579 (void) MALLOC_POSTACTION;
1580 return m;
1581 }
1582 return 0;
1583}
1584
1585void public_fREe(Void_t* m) {
1586 if (MALLOC_PREACTION == 0) {
1587 fREe(m);
1588 (void) MALLOC_POSTACTION;
1589 }
1590}
1591
1592Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1593 if (MALLOC_PREACTION == 0) {
1594 m = rEALLOc(m, bytes);
1595 (void) MALLOC_POSTACTION;
1596 return m;
1597 }
1598 return 0;
1599}
1600
1601Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1602 Void_t* m;
1603 if (MALLOC_PREACTION == 0) {
1604 m = mEMALIGn(alignment, bytes);
1605 (void) MALLOC_POSTACTION;
1606 return m;
1607 }
1608 return 0;
1609}
1610
1611int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1612 int m, ret;
1613 if ((ret = MALLOC_PREACTION) == 0) {
1614 m = posix_mEMALIGn(memptr, alignment, bytes);
1615 (void) MALLOC_POSTACTION;
1616 return m;
1617 }
1618 return ret;
1619}
1620
1621Void_t* public_vALLOc(size_t bytes) {
1622 Void_t* m;
1623 if (MALLOC_PREACTION == 0) {
1624 m = vALLOc(bytes);
1625 (void) MALLOC_POSTACTION;
1626 return m;
1627 }
1628 return 0;
1629}
1630
1631Void_t* public_pVALLOc(size_t bytes) {
1632 Void_t* m;
1633 if (MALLOC_PREACTION == 0) {
1634 m = pVALLOc(bytes);
1635 (void) MALLOC_POSTACTION;
1636 return m;
1637 }
1638 return 0;
1639}
1640
1641Void_t* public_cALLOc(size_t n, size_t elem_size) {
1642 Void_t* m;
1643 if (MALLOC_PREACTION == 0) {
1644 m = cALLOc(n, elem_size);
1645 (void) MALLOC_POSTACTION;
1646 return m;
1647 }
1648 return 0;
1649}
1650
1651int public_mTRIm(size_t s) {
1652 int result;
1653 if (MALLOC_PREACTION == 0) {
1654 result = mTRIm(s);
1655 (void) MALLOC_POSTACTION;
1656 return result;
1657 }
1658 return 0;
1659}
1660
1661size_t public_mUSABLe(Void_t* m) {
1662 size_t result;
1663 if (MALLOC_PREACTION == 0) {
1664 result = mUSABLe(m);
1665 (void) MALLOC_POSTACTION;
1666 return result;
1667 }
1668 return 0;
1669}
1670
1671void public_mSTATs() {
1672 if (MALLOC_PREACTION == 0) {
1673 mSTATs();
1674 (void) MALLOC_POSTACTION;
1675 }
1676}
1677
1678struct mallinfo public_mALLINFo() {
1679 struct mallinfo m;
1680 if (MALLOC_PREACTION == 0) {
1681 m = mALLINFo();
1682 (void) MALLOC_POSTACTION;
1683 return m;
1684 } else {
1685 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1686 return nm;
1687 }
1688}
1689
1690int public_mALLOPt(int p, int v) {
1691 int result;
1692 if (MALLOC_PREACTION == 0) {
1693 result = mALLOPt(p, v);
1694 (void) MALLOC_POSTACTION;
1695 return result;
1696 }
1697 return 0;
1698}
1699
1700#else
1701
1702int dnmalloc_pthread_init(void) { return 0; }
1703#define DL_STATIC
1704
1705#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1706
1707
1708
1709/* ------------- Optional versions of memcopy ---------------- */
1710
1711
1712#if USE_MEMCPY
1713
1714/*
1715 Note: memcpy is ONLY invoked with non-overlapping regions,
1716 so the (usually slower) memmove is not needed.
1717*/
1718
1719#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1720#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1721
1722#else /* !USE_MEMCPY */
1723
1724/* Use Duff's device for good zeroing/copying performance. */
1725
1726#define MALLOC_ZERO(charp, nbytes) \
1727do { \
1728 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1729 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1730 long mcn; \
1731 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1732 switch (mctmp) { \
1733 case 0: for(;;) { *mzp++ = 0; \
1734 case 7: *mzp++ = 0; \
1735 case 6: *mzp++ = 0; \
1736 case 5: *mzp++ = 0; \
1737 case 4: *mzp++ = 0; \
1738 case 3: *mzp++ = 0; \
1739 case 2: *mzp++ = 0; \
1740 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1741 } \
1742} while(0)
1743
1744#define MALLOC_COPY(dest,src,nbytes) \
1745do { \
1746 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1747 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1748 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1749 long mcn; \
1750 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1751 switch (mctmp) { \
1752 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1753 case 7: *mcdst++ = *mcsrc++; \
1754 case 6: *mcdst++ = *mcsrc++; \
1755 case 5: *mcdst++ = *mcsrc++; \
1756 case 4: *mcdst++ = *mcsrc++; \
1757 case 3: *mcdst++ = *mcsrc++; \
1758 case 2: *mcdst++ = *mcsrc++; \
1759 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1760 } \
1761} while(0)
1762
1763#endif
1764
1765/* ------------------ MMAP support ------------------ */
1766
1767
1768#if defined(HAVE_FCNTL_H)
1769#include <fcntl.h>
1770#endif
1771
1772#if defined(HAVE_SYS_MMAN_H)
1773#include <sys/mman.h>
1774#endif
1775
1776#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1777#define MAP_ANONYMOUS MAP_ANON
1778#endif
1779
1780/*
1781 Nearly all versions of mmap support MAP_ANONYMOUS,
1782 so the following is unlikely to be needed, but is
1783 supplied just in case.
1784*/
1785
1786#ifndef MAP_ANONYMOUS
1787
1788/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1789 */
1790void * anon_mmap (void *addr, size_t length, int prot, int flags)
1791{
1792 void * retval = NULL;
1793 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1794
1795 dev_zero_fd = open("/dev/zero", O_RDWR);
1796 if (dev_zero_fd >= 0)
1797 {
1798 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1799 /* closing the file descriptor does not unmap the region */
1800 close(dev_zero_fd);
1801 }
1802 return retval;
1803}
1804
1805#define MMAP(addr, size, prot, flags) \
1806 (anon_mmap((addr), (size), (prot), (flags)))
1807
1808
1809#else /* have MAP_ANONYMOUS */
1810
1811#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1812#define MAP_32BIT MAP_ADDR32
1813#endif
1814
1815#if defined(MAP_32BIT)
1816#define MMAP(addr, size, prot, flags) \
1817 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1818#elif defined(__sun)
1819/*
1820 * Hint an address within 32bit address space
1821 */
1822#define MMAP(addr, size, prot, flags) \
1823 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1824#else
1825/* *BSD */
1826#define MMAP(addr, size, prot, flags) \
1827 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1828#endif
1829
1830#endif /* have MAP_ANONYMOUS */
1831
1832
1833/*
1834 ----------------------- Chunk representations -----------------------
1835*/
1836
1837typedef void * mchunkptr;
1838
1839struct chunkinfo {
1840 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1841 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1842 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1843 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1844 in the linked list if the hash
1845 value is the same as the chunk */
1846 struct chunkinfo* fd; /* double links -- used only if free. */
1847 struct chunkinfo* bk;
1848 mchunkptr chunk;
1849};
1850
1851typedef struct chunkinfo* chunkinfoptr;
1852
1853struct cireginfo {
1854 unsigned long position;
1855 unsigned long *freebitmap;
1856 struct cireginfo* next;
1857 struct chunkinfo *freelist;
1858 struct chunkinfo *begin;
1859 unsigned long freecounter;
1860};
1861
1862/*
1863 ---------- Size and alignment checks and conversions ----------
1864*/
1865
1866/* conversion from malloc headers to user pointers, and back */
1867#define chunk(p) (p->chunk)
1868
1869
1870#define chunk2mem(p) (chunk(p))
1871#define mem2chunk(mem) (hashtable_lookup(mem))
1872
1873/* The smallest possible chunk */
1874/* #define MIN_CHUNK_SIZE 16 */
1875#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
1876# define MIN_CHUNK_SIZE 32
1877#else
1878# define MIN_CHUNK_SIZE 16
1879#endif
1880
1881/* The smallest size we can malloc is an aligned minimal chunk */
1882
1883#define MINSIZE \
1884 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1885
1886/* Check if m has acceptable alignment */
1887
1888#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1889
1890#define GUARD_SIZE 4
1891
1892/*
1893 Check if a request is so large that it would wrap around zero when
1894 padded and aligned. To simplify some other code, the bound is made
1895 low enough so that adding MINSIZE will also not wrap around zero.
1896
1897 Make it 4*MINSIZE.
1898*/
1899
1900#define REQUEST_OUT_OF_RANGE(req) \
1901 ((CHUNK_SIZE_T)(req) >= \
1902 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1903
1904/* pad request bytes into a usable size -- internal version */
1905
1906#define request2size(req) \
1907 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1908 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1909 MINSIZE)
1910
1911/* Same, except also perform argument check */
1912
1913#define checked_request2size(req, sz) \
1914 if (!REQUEST_OUT_OF_RANGE(req)) { \
1915 (sz) = request2size(req); \
1916 assert((sz-req) >= GUARD_SIZE); \
1917 } else { \
1918 MALLOC_FAILURE_ACTION; \
1919 return 0; \
1920 }
1921
1922#if PARANOIA > 2
1923static char * guard_set_p;
1924static char * guard_set_q;
1925
1926#define guard_set(guard, P, request, sz) \
1927 assert((sz-request) >= GUARD_SIZE); \
1928 guard_set_p = (char*)(chunk(P)); \
1929 guard_set_p += request; \
1930 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
1931 guard_set_q = (char*)(guard); \
1932 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1933 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1934 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1935 *guard_set_p = *guard_set_q; \
1936 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
1937 (P)->req = request
1938
1939#define guard_check(guard, P) \
1940 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1941 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1942 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
1943
1944#else
1945#define guard_set(guard, P, request, sz) ((void)0)
1946#define guard_check(guard, P) ((void)0)
1947#endif /* PARANOIA > 2 */
1948
1949/* dnmalloc forward declarations */
1950static char * dnmalloc_arc4random(void);
1951static void dnmalloc_init (void);
1952static void malloc_mmap_state(void);
1953static void cireg_extend (void);
1954static chunkinfoptr cireg_getfree (void);
1955static void hashtable_add (chunkinfoptr ci);
1956static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1957static void hashtable_remove (mchunkptr p);
1958static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1959static chunkinfoptr hashtable_lookup (mchunkptr p);
1960static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1961static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1962
1963
1964
1965/*
1966 --------------- Physical chunk operations ---------------
1967*/
1968
1969
1970/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1971#define PREV_INUSE 0x1
1972
1973/* extract inuse bit of previous chunk */
1974#define prev_inuse(p) ((p)->size & PREV_INUSE)
1975
1976/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1977#define IS_MMAPPED 0x2
1978
1979/* check for mmap()'ed chunk */
1980#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1981
1982
1983/* size field is or'ed when the chunk is in use */
1984#define INUSE 0x4
1985
1986/* extract inuse bit of chunk */
1987#define inuse(p) ((p)->size & INUSE)
1988
1989/*
1990 Bits to mask off when extracting size
1991
1992 Note: IS_MMAPPED is intentionally not masked off from size field in
1993 macros for which mmapped chunks should never be seen. This should
1994 cause helpful core dumps to occur if it is tried by accident by
1995 people extending or adapting this malloc.
1996*/
1997#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
1998
1999/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
2000#define SIZE_NOMMAP (PREV_INUSE|INUSE)
2001
2002/* Get size, ignoring use bits */
2003#define chunksize(p) ((p)->size & ~(SIZE_BITS))
2004
2005/* Ptr to chunkinfo of next physical malloc_chunk. */
2006#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2007
2008/* Treat space at ptr + offset as a chunk */
2009#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2010
2011/* set/clear chunk as being inuse without otherwise disturbing */
2012#define set_inuse(p) ((p)->size |= INUSE)
2013
2014#define clear_inuse(p) ((p)->size &= ~(INUSE))
2015
2016#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2017
2018#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2019
2020static void set_previnuse_next (chunkinfoptr p)
2021{
2022 chunkinfoptr q;
2023 q = next_chunkinfo (p);
2024 if (q)
2025 set_previnuse (q);
2026}
2027
2028#define set_all_inuse(p) \
2029set_inuse(p); \
2030set_previnuse_next(p);
2031
2032
2033/* Set size at head, without disturbing its use bit */
2034#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2035
2036/* Set size/use field */
2037#define set_head(p, s) ((p)->size = (s))
2038
2039/*
2040 Bins
2041
2042 An array of bin headers for free chunks. Each bin is doubly
2043 linked. The bins are approximately proportionally (log) spaced.
2044 There are a lot of these bins (128). This may look excessive, but
2045 works very well in practice. Most bins hold sizes that are
2046 unusual as malloc request sizes, but are more usual for fragments
2047 and consolidated sets of chunks, which is what these bins hold, so
2048 they can be found quickly. All procedures maintain the invariant
2049 that no consolidated chunk physically borders another one, so each
2050 chunk in a list is known to be preceeded and followed by either
2051 inuse chunks or the ends of memory.
2052
2053 Chunks in bins are kept in size order, with ties going to the
2054 approximately least recently used chunk. Ordering isn't needed
2055 for the small bins, which all contain the same-sized chunks, but
2056 facilitates best-fit allocation for larger chunks. These lists
2057 are just sequential. Keeping them in order almost never requires
2058 enough traversal to warrant using fancier ordered data
2059 structures.
2060
2061 Chunks of the same size are linked with the most
2062 recently freed at the front, and allocations are taken from the
2063 back. This results in LRU (FIFO) allocation order, which tends
2064 to give each chunk an equal opportunity to be consolidated with
2065 adjacent freed chunks, resulting in larger free chunks and less
2066 fragmentation.
2067
2068 To simplify use in double-linked lists, each bin header acts
2069 as a malloc_chunk. This avoids special-casing for headers.
2070 But to conserve space and improve locality, we allocate
2071 only the fd/bk pointers of bins, and then use repositioning tricks
2072 to treat these as the fields of a malloc_chunk*.
2073*/
2074
2075typedef struct chunkinfo* mbinptr;
2076
2077/* addressing -- note that bin_at(0) does not exist */
2078#define bin_at(m, i) (&(m)->bins[i])
2079
2080/* analog of ++bin */
2081#define next_bin(b) (b+1)
2082
2083/* Reminders about list directionality within bins */
2084#define first(b) ((b)->fd)
2085#define last(b) ((b)->bk)
2086
2087/* Take a chunk off a bin list */
2088#define unlink(P, BK, FD) { \
2089 FD = P->fd; \
2090 BK = P->bk; \
2091 FD->bk = BK; \
2092 BK->fd = FD; \
2093}
2094
2095/*
2096 Indexing
2097
2098 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2099 8 bytes apart. Larger bins are approximately logarithmically spaced:
2100
2101 64 bins of size 8
2102 32 bins of size 64
2103 16 bins of size 512
2104 8 bins of size 4096
2105 4 bins of size 32768
2106 2 bins of size 262144
2107 1 bin of size what's left
2108
2109 The bins top out around 1MB because we expect to service large
2110 requests via mmap.
2111*/
2112
2113#define NBINS 96
2114#define NSMALLBINS 32
2115#define SMALLBIN_WIDTH 8
2116#define MIN_LARGE_SIZE 256
2117
2118#define in_smallbin_range(sz) \
2119 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2120
2121#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2122
2123/*
2124 Compute index for size. We expect this to be inlined when
2125 compiled with optimization, else not, which works out well.
2126*/
2127static int largebin_index(size_t sz) {
2128
2129 unsigned long xx = sz >> SMALLBIN_WIDTH;
2130
2131 if (xx < 0x10000)
2132 {
2133 unsigned int m; /* bit position of highest set bit of m */
2134
2135 /* On intel, use BSRL instruction to find highest bit */
2136#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2137
2138 unsigned int x = (unsigned int) xx;
2139
2140 __asm__("bsrl %1,%0\n\t"
2141 : "=r" (m)
2142 : "rm" (x));
2143
2144#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2145
2146 __asm__("bsrq %1,%0\n\t"
2147 : "=r" (m)
2148 : "rm" (xx));
2149
2150#else
2151
2152 /* Taken from Bit Twiddling Hacks
2153 * http://graphics.stanford.edu/~seander/bithacks.html
2154 * public domain
2155 */
2156 unsigned int v = (unsigned int) xx;
2157 register unsigned int shift;
2158
2159 m = (v > 0xFFFF) << 4; v >>= m;
2160 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2161 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2162 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2163 m |= (v >> 1);
2164
2165#endif
2166
2167 /* Use next 2 bits to create finer-granularity bins */
2168 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2169 }
2170 else
2171 {
2172 return NBINS-1;
2173 }
2174}
2175
2176#define bin_index(sz) \
2177 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2178
2179/*
2180 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2181 first bin that is maintained in sorted order. This must
2182 be the smallest size corresponding to a given bin.
2183
2184 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2185 best fit guarantees to sometimes speed up malloc by increasing value.
2186 Doing this means that malloc may choose a chunk that is
2187 non-best-fitting by up to the width of the bin.
2188
2189 Some useful cutoff values:
2190 512 - all bins sorted
2191 2560 - leaves bins <= 64 bytes wide unsorted
2192 12288 - leaves bins <= 512 bytes wide unsorted
2193 65536 - leaves bins <= 4096 bytes wide unsorted
2194 262144 - leaves bins <= 32768 bytes wide unsorted
2195 -1 - no bins sorted (not recommended!)
2196*/
2197
2198/* #define FIRST_SORTED_BIN_SIZE 65536 */
2199
2200#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2201
2202
2203/*
2204 Unsorted chunks
2205
2206 All remainders from chunk splits, as well as all returned chunks,
2207 are first placed in the "unsorted" bin. They are then placed
2208 in regular bins after malloc gives them ONE chance to be used before
2209 binning. So, basically, the unsorted_chunks list acts as a queue,
2210 with chunks being placed on it in free (and malloc_consolidate),
2211 and taken off (to be either used or placed in bins) in malloc.
2212*/
2213
2214/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2215#define unsorted_chunks(M) (bin_at(M, 1))
2216
2217/*
2218 Top
2219
2220 The top-most available chunk (i.e., the one bordering the end of
2221 available memory) is treated specially. It is never included in
2222 any bin, is used only if no other chunk is available, and is
2223 released back to the system if it is very large (see
2224 M_TRIM_THRESHOLD). Because top initially
2225 points to its own bin with initial zero size, thus forcing
2226 extension on the first malloc request, we avoid having any special
2227 code in malloc to check whether it even exists yet. But we still
2228 need to do so when getting memory from system, so we make
2229 initial_top treat the bin as a legal but unusable chunk during the
2230 interval between initialization and the first call to
2231 sYSMALLOc. (This is somewhat delicate, since it relies on
2232 the 2 preceding words to be zero during this interval as well.)
2233*/
2234
2235/* Conveniently, the unsorted bin can be used as dummy top on first call */
2236#define initial_top(M) (unsorted_chunks(M))
2237
2238/*
2239 Binmap
2240
2241 To help compensate for the large number of bins, a one-level index
2242 structure is used for bin-by-bin searching. `binmap' is a
2243 bitvector recording whether bins are definitely empty so they can
2244 be skipped over during during traversals. The bits are NOT always
2245 cleared as soon as bins are empty, but instead only
2246 when they are noticed to be empty during traversal in malloc.
2247*/
2248
2249/* Conservatively use 32 bits per map word, even if on 64bit system */
2250#define BINMAPSHIFT 5
2251#define BITSPERMAP (1U << BINMAPSHIFT)
2252#define BINMAPSIZE (NBINS / BITSPERMAP)
2253
2254#define idx2block(i) ((i) >> BINMAPSHIFT)
2255#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2256
2257#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2258#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2259#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2260
2261/*
2262 Fastbins
2263
2264 An array of lists holding recently freed small chunks. Fastbins
2265 are not doubly linked. It is faster to single-link them, and
2266 since chunks are never removed from the middles of these lists,
2267 double linking is not necessary. Also, unlike regular bins, they
2268 are not even processed in FIFO order (they use faster LIFO) since
2269 ordering doesn't much matter in the transient contexts in which
2270 fastbins are normally used.
2271
2272 Chunks in fastbins keep their inuse bit set, so they cannot
2273 be consolidated with other free chunks. malloc_consolidate
2274 releases all chunks in fastbins and consolidates them with
2275 other free chunks.
2276*/
2277
2278typedef struct chunkinfo* mfastbinptr;
2279
2280/* offset 2 to use otherwise unindexable first 2 bins */
2281#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2282
2283/* The maximum fastbin request size we support */
2284#define MAX_FAST_SIZE 80
2285
2286#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2287
2288/*
2289 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2290 that triggers automatic consolidation of possibly-surrounding
2291 fastbin chunks. This is a heuristic, so the exact value should not
2292 matter too much. It is defined at half the default trim threshold as a
2293 compromise heuristic to only attempt consolidation if it is likely
2294 to lead to trimming. However, it is not dynamically tunable, since
2295 consolidation reduces fragmentation surrounding loarge chunks even
2296 if trimming is not used.
2297*/
2298
2299#define FASTBIN_CONSOLIDATION_THRESHOLD \
2300 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2301
2302/*
2303 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2304 they are used as flags.
2305*/
2306
2307/*
2308 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2309 freed chunks at all. It is set true when entering a chunk into any
2310 bin.
2311*/
2312
2313#define ANYCHUNKS_BIT (1U)
2314
2315#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2316#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2317#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2318
2319/*
2320 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2321 some fastbin chunks. It is set true on entering a chunk into any
2322 fastbin, and cleared only in malloc_consolidate.
2323*/
2324
2325#define FASTCHUNKS_BIT (2U)
2326
2327#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2328#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2329#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2330
2331/*
2332 Set value of max_fast.
2333 Use impossibly small value if 0.
2334*/
2335
2336#define set_max_fast(M, s) \
2337 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2338 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2339
2340#define get_max_fast(M) \
2341 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2342
2343
2344/*
2345 morecore_properties is a status word holding dynamically discovered
2346 or controlled properties of the morecore function
2347*/
2348
2349#define MORECORE_CONTIGUOUS_BIT (1U)
2350
2351#define contiguous(M) \
2352 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2353#define noncontiguous(M) \
2354 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2355#define set_contiguous(M) \
2356 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2357#define set_noncontiguous(M) \
2358 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2359
2360#define MORECORE_32BIT_BIT (2U)
2361
2362#define morecore32bit(M) \
2363 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2364#define nonmorecore32bit(M) \
2365 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2366#define set_morecore32bit(M) \
2367 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2368#define set_nonmorecore32bit(M) \
2369 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2370
2371
2372
2373/* ----------------- dnmalloc -------------------- */
2374
2375/* size of pages */
2376#define PGSIZE malloc_getpagesize
2377/* pointer size */
2378#define PTRSIZE sizeof(long)
2379
2380
2381
2382/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2383 address space: the hashtable has granularity of 16*8, set it to something
2384 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2385 page)
2386*/
2387
2388/* The maximum heap size that dnmalloc can operate with
2389 * represented in hex to avoid annoying gcc warning
2390 *
2391 * Avoid integer overflow, cover complete 32bit address
2392 * space for portability. With deferred allocation, the
2393 * hashtable size is a non-issue.
2394 */
2395#define HEAPMAXSIZE_HALF 0x80000000UL
2396
2397/* How many elements are stored in the linked list */
2398#define LINKEDLSTELS 8
2399
2400/* Minimum size of a chunk */
2401
2402#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2403# define MINCHUNKSIZE 32
2404#else
2405# define MINCHUNKSIZE 16
2406#endif
2407
2408
2409/* The amount of hashtable entries for each page:
2410Pagesize divded by the numer of elements in the linkedlists
2411divided by the minimum chunk size
2412*/
2413#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2414
2415/* The amount of hashtable entries needed to manage the memory:
2416Maximum heap size divided by page size multiplied by the amount
2417of chunk info's per page
2418*/
2419#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2420
2421/* Initial size of the map for the hashtable
2422Amount of entries muliplied by pointer size
2423*/
2424#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2425
2426/* Amount of free chunks that the system should allocate at the start */
2427#define NUMBER_FREE_CHUNKS 32768
2428
2429/* Initial size of the chunk info region,
2430also used when growing the region */
2431#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2432
2433/* Start address of the heap */
2434char *startheap;
2435
2436/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2437chunkinfoptr *hashtable;
2438
2439/* Current chunkinfo region */
2440struct cireginfo *currciinfo = 0;
2441struct cireginfo *firstciinfo = 0;
2442
2443unsigned long totalcictr = 0;
2444
2445
2446/* Initialize the area for chunkinfos and the hashtable and protect
2447 * it with non-writable pages
2448 */
2449static void
2450dnmalloc_init ()
2451{
2452 void *hashtb;
2453 int mprot;
2454 int flags = MAP_PRIVATE;
2455
2456 /* Allocate the malloc_state struct */
2457 malloc_mmap_state();
2458
2459 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2460 * systems use defered allocation anyway.
2461 */
2462#ifdef MAP_NORESERVE
2463 flags |= MAP_NORESERVE;
2464#endif
2465
2466 /* Always start at 0, hashtable covers whole 32bit address space
2467 */
2468#define STARTHEAP_IS_ZERO
2469 startheap = 0;
2470
2471 /* Map space for the hashtable */
2472#if PARANOIA > 1
2473 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2474#else
2475 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2476#endif
2477
2478#ifdef NDEBUG
2479 if (hashtb == MAP_FAILED) {
2480 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2481 abort ();
2482 }
2483#else
2484 assert(hashtb != MAP_FAILED);
2485#endif
2486
2487 /* Protect the hashtable with non-writable pages */
2488 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2489#ifdef NDEBUG
2490 if (mprot == -1) {
2491 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2492 strerror (errno));
2493 abort ();
2494 }
2495#else
2496 assert(mprot != -1);
2497#endif
2498
2499 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2500 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2501
2502 /* Protect the hashtable with non-writable pages */
2503#if PARANOIA > 1
2504 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2505#ifdef NDEBUG
2506 if (mprot == -1) {
2507 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2508 strerror (errno));
2509 abort ();
2510 }
2511#else
2512 assert(mprot != -1);
2513#endif
2514#endif
2515}
2516
2517
2518
2519/* Extend the region for chunk infos by mapping more memory before the region */
2520static void
2521cireg_extend ()
2522{
2523 void *newcireg;
2524 int mprot;
2525 struct cireginfo *tempciinfo = 0;
2526
2527#if PARANOIA > 1
2528 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2529#else
2530 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2531#endif
2532
2533#ifdef NDEBUG
2534 if (newcireg == MAP_FAILED)
2535 {
2536 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2537 strerror (errno));
2538 abort ();
2539 }
2540#else
2541 assert(newcireg != MAP_FAILED);
2542#endif
2543 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2544#ifdef NDEBUG
2545 if (mprot == -1) {
2546 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2547 strerror (errno));
2548 abort ();
2549 }
2550#else
2551 assert(mprot != -1);
2552#endif
2553 newcireg = ((char*)newcireg)+PGSIZE;
2554
2555#if PARANOIA > 1
2556 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2557#ifdef NDEBUG
2558 if (mprot == -1) {
2559 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2560 strerror (errno));
2561 abort ();
2562 }
2563#else
2564 assert(mprot != -1);
2565#endif
2566#endif
2567
2568 tempciinfo = currciinfo;
2569 currciinfo = (struct cireginfo *) newcireg;
2570 if (tempciinfo)
2571 tempciinfo->next = currciinfo;
2572 currciinfo->position = 1;
2573 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2574 if (!firstciinfo)
2575 firstciinfo = currciinfo;
2576 totalcictr++;
2577 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
2578}
2579
2580
2581/* Get a free chunkinfo */
2582static chunkinfoptr
2583cireg_getfree ()
2584{
2585 chunkinfoptr freeci;
2586 chunkinfoptr freelst = 0;
2587 struct cireginfo *newciinfo = firstciinfo;
2588
2589 if (newciinfo) {
2590 freelst = newciinfo->freelist;
2591
2592 if (!freelst && newciinfo->next) {
2593 do {
2594 newciinfo = newciinfo->next;
2595 freelst = newciinfo->freelist;
2596 } while (!freelst && newciinfo->next);
2597 }
2598 }
2599
2600 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2601 if (freelst)
2602 {
2603 freeci = freelst;
2604 newciinfo->freecounter--;
2605 newciinfo->freelist = freelst->fd;
2606
2607 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2608 sizeof(struct chunkinfo));
2609
2610 freeci->prev_size = 0;
2611 freeci->size = 0;
2612 freeci->req = 0;
2613 freeci->hash_next = NULL;
2614 freeci->fd = NULL;
2615 freeci->bk = NULL;
2616 freeci->chunk = NULL;
2617 return (freeci);
2618 }
2619 else
2620 {
2621 /* No free chunkinfos, check if chunkinfo region still has place
2622 * for a chunkinfo. If not, extend the region.
2623 */
2624 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2625 cireg_extend ();
2626 /* Get a chunkinfo from the chunkinfo region */
2627 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2628 currciinfo->freecounter--;
2629 currciinfo->position++;
2630
2631 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2632 sizeof(struct chunkinfo));
2633
2634 return (freeci);
2635 }
2636}
2637
2638static void freeciregion(struct cireginfo *freeme) {
2639 /* free the chunkinfo region */
2640 struct cireginfo *newciinfo = firstciinfo;
2641 struct cireginfo *prevciinfo = firstciinfo;
2642 void *unmapme;
2643
2644 while (newciinfo && newciinfo != freeme) {
2645 prevciinfo = newciinfo;
2646 newciinfo = newciinfo->next;
2647 }
2648 assert(freeme == newciinfo); /* rw */
2649 assert(newciinfo != NULL); /* rw */
2650 if (newciinfo)
2651 prevciinfo->next = newciinfo->next;
2652 unmapme = (void *) ((char*)freeme - PGSIZE);
2653 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
2654#if PARANOIA > 1
2655 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2656#else
2657 munmap(unmapme, CIREGSIZE+PGSIZE);
2658#endif
2659}
2660
2661
2662static void freecilst_add(chunkinfoptr p) {
2663
2664 struct cireginfo *newciinfo;
2665
2666 newciinfo = currciinfo;
2667 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2668 p->fd = newciinfo->freelist;
2669 newciinfo->freelist = p;
2670 newciinfo->freecounter++;
2671 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2672 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2673 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2674 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2675 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2676 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
2677 } else {
2678 newciinfo = firstciinfo;
2679 if (newciinfo) {
2680 do {
2681 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2682 p->fd = newciinfo->freelist;
2683 newciinfo->freelist = p;
2684 newciinfo->freecounter++;
2685 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2686 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2687 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2688 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2689 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2690 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
2691 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2692 freeciregion(newciinfo);
2693 break;
2694 }
2695 newciinfo = newciinfo->next;
2696 } while (newciinfo);
2697 }
2698 }
2699}
2700
2701/* Calculate the hash table entry for a chunk */
2702#ifdef STARTHEAP_IS_ZERO
2703#define hash(p) (((unsigned long) p) >> 7)
2704#else
2705#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2706#endif
2707
2708static void
2709hashtable_add (chunkinfoptr ci)
2710{
2711 chunkinfoptr temp, next;
2712 unsigned long hashval;
2713 mchunkptr cic = chunk (ci);
2714
2715 hashval = hash (cic);
2716
2717 if (hashval < AMOUNTHASH) {
2718
2719 temp = hashtable[hashval];
2720
2721#ifdef DNMALLOC_DEBUG
2722 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2723#endif
2724
2725 /* If no pointer to a chunk info list is stored at this location
2726 * in the hashtable or if the chunk's address is smaller than the
2727 * one present, add the chunk to the front of the linked list
2728 */
2729 if (temp == 0 || chunk (temp) > cic)
2730 {
2731 ci->hash_next = temp;
2732 hashtable[hashval] = ci;
2733 if (!temp) /* more likely case */
2734 goto out;
2735 temp->prev_size = chunksize(ci);
2736 return;
2737 }
2738 else
2739 {
2740 /* We must place the chunk in the linked list for this hashentry
2741 * Loop to end of list or to a position where temp's chunk's address
2742 * is larger than the new chunkinfo's chunk's address
2743 */
2744 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2745 {
2746 ci->hash_next = temp->hash_next;
2747 temp->hash_next = ci;
2748 }
2749 else
2750 {
2751 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2752 {
2753 temp = temp->hash_next;
2754 }
2755 /* Place in linked list if not already there */
2756 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2757 {
2758 ci->hash_next = temp->hash_next;
2759 temp->hash_next = ci;
2760 }
2761 }
2762 }
2763 }
2764 else {
2765#ifdef DNMALLOC_CHECKS
2766 if (hashval >= AMOUNTHASH) {
2767 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2768 abort();
2769 }
2770#else
2771 assert(hashval < AMOUNTHASH);
2772#endif
2773 }
2774
2775 out:
2776 next = next_chunkinfo(ci);
2777 if (!next)
2778 return;
2779 next->prev_size = chunksize(ci);
2780}
2781
2782static void
2783hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2784{
2785 chunkinfoptr next;
2786
2787#ifdef DNMALLOC_DEBUG
2788 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2789 (unsigned long)hash(chunk(ci_insert));
2790#endif
2791
2792 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2793 hashtable_add(ci_insert);
2794 }
2795 else {
2796
2797 ci_insert->hash_next = ci_orig->hash_next;
2798 ci_orig->hash_next = ci_insert;
2799
2800 /* added for prevsize */
2801 if (!(ci_insert->hash_next))
2802 next = next_chunkinfo(ci_insert);
2803 else
2804 next = ci_insert->hash_next;
2805
2806 if (!next)
2807 {
2808 ci_insert->prev_size = chunksize(ci_orig);
2809 }
2810 else
2811 {
2812 next->prev_size = chunksize(ci_insert);
2813 ci_insert->prev_size = chunksize(ci_orig);
2814 }
2815 }
2816}
2817
2818static void
2819hashtable_remove (mchunkptr p)
2820{
2821 chunkinfoptr prevtemp, temp;
2822 unsigned long hashval;
2823
2824 hashval = hash (p);
2825#ifdef DNMALLOC_DEBUG
2826 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2827#endif
2828 assert(hashval < AMOUNTHASH); /* rw */
2829 prevtemp = temp = hashtable[hashval];
2830 if (chunk (temp) == p) {
2831 hashtable[hashval] = temp->hash_next;
2832 }
2833 else
2834 {
2835 if (temp && chunk (temp) != p) {
2836 do
2837 {
2838 prevtemp = temp;
2839 temp = temp->hash_next;
2840 } while (temp && chunk (temp) != p);
2841 }
2842#ifdef DNMALLOC_CHECKS
2843 if (!temp) {
2844 fprintf (stderr,
2845 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2846 p, hashval);
2847 abort();
2848 }
2849#else
2850 assert(temp != NULL);
2851#endif
2852 prevtemp->hash_next = temp->hash_next;
2853 }
2854}
2855
2856/* mmapped chunks are multiples of pagesize, no hash_nexts,
2857 * just remove from the hashtable
2858 */
2859#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2860
2861static void
2862hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2863{
2864 unsigned long hashval;
2865 chunkinfoptr next;
2866
2867#ifdef DNMALLOC_DEBUG
2868 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
2869#endif
2870
2871 if (ci_orig->hash_next != ci_todelete) {
2872 hashval = hash(chunk(ci_todelete));
2873 assert(hashval < AMOUNTHASH); /* rw */
2874#ifdef DNMALLOC_CHECKS
2875 if (hashtable[hashval] != ci_todelete ) {
2876 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2877 }
2878#else
2879 assert(hashtable[hashval] == ci_todelete);
2880#endif
2881 hashtable[hashval] = ci_todelete->hash_next;
2882 }
2883
2884 else {
2885 ci_orig->hash_next = ci_todelete->hash_next;
2886 if (!ci_orig->hash_next) {
2887 next = next_chunkinfo(ci_orig);
2888 } else {
2889 next = ci_orig->hash_next;
2890 }
2891 if (next)
2892 next->prev_size = chunksize(ci_orig);
2893
2894 }
2895}
2896
2897
2898static chunkinfoptr
2899hashtable_lookup (mchunkptr p)
2900{
2901 chunkinfoptr ci;
2902 unsigned long hashval;
2903
2904 /* if we were called wrongly
2905 * if ((char *) p < startheap) return 0;
2906 */
2907 if ((char *) p >= startheap)
2908 {
2909 hashval = hash (p);
2910 assert(hashval < AMOUNTHASH); /* rw */
2911 ci = hashtable[hashval];
2912 if (ci && chunk (ci) == p)
2913 return ci;
2914
2915 if (ci) {
2916 do {
2917 ci = ci->hash_next;
2918 } while (ci && chunk (ci) != p);
2919 }
2920#ifdef DNMALLOC_CHECKS
2921 /* This should never occur but if it does, we'd like to know */
2922 if (!ci) {
2923 fprintf (stderr,
2924 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2925 p, hashval);
2926 abort();
2927 }
2928#else
2929 assert(ci != NULL);
2930#endif
2931 return ci;
2932 }
2933 return 0;
2934}
2935
2936
2937
2938/*
2939 ----------- Internal state representation and initialization -----------
2940*/
2941
2942struct malloc_state {
2943
2944 /* The maximum chunk size to be eligible for fastbin */
2945 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2946
2947 /* Fastbins */
2948 mfastbinptr fastbins[NFASTBINS];
2949
2950 /* Base of the topmost chunk -- not otherwise kept in a bin */
2951 chunkinfoptr top;
2952
2953 /* The remainder from the most recent split of a small request */
2954 chunkinfoptr last_remainder;
2955
2956 /* Normal bins */
2957 struct chunkinfo bins[NBINS];
2958
2959 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2960 unsigned int binmap[BINMAPSIZE+1];
2961
2962 /* Tunable parameters */
2963 CHUNK_SIZE_T trim_threshold;
2964 INTERNAL_SIZE_T top_pad;
2965 INTERNAL_SIZE_T mmap_threshold;
2966
2967 /* Memory map support */
2968 int n_mmaps;
2969 int n_mmaps_max;
2970 int max_n_mmaps;
2971
2972 /* Cache malloc_getpagesize */
2973 unsigned int pagesize;
2974
2975 /* Canary */
2976 char guard_stored[GUARD_SIZE];
2977
2978 /* Track properties of MORECORE */
2979 unsigned int morecore_properties;
2980
2981 /* Statistics */
2982 INTERNAL_SIZE_T mmapped_mem;
2983 INTERNAL_SIZE_T sbrked_mem;
2984 INTERNAL_SIZE_T max_sbrked_mem;
2985 INTERNAL_SIZE_T max_mmapped_mem;
2986 INTERNAL_SIZE_T max_total_mem;
2987};
2988
2989typedef struct malloc_state *mstate;
2990
2991/*
2992 There is exactly one instance of this struct in this malloc.
2993 If you are adapting this malloc in a way that does NOT use a static
2994 malloc_state, you MUST explicitly zero-fill it before using. This
2995 malloc relies on the property that malloc_state is initialized to
2996 all zeroes (as is true of C statics).
2997*/
2998
2999static struct malloc_state * av_ = NULL; /* never directly referenced */
3000
3001/*
3002 All uses of av_ are via get_malloc_state().
3003 At most one "call" to get_malloc_state is made per invocation of
3004 the public versions of malloc and free, but other routines
3005 that in turn invoke malloc and/or free may call more then once.
3006 Also, it is called in check* routines if DEBUG is set.
3007*/
3008
3009#define get_malloc_state() (av_)
3010
3011/*
3012 Initialize a malloc_state struct.
3013
3014 This is called only from within malloc_consolidate, which needs
3015 be called in the same contexts anyway. It is never called directly
3016 outside of malloc_consolidate because some optimizing compilers try
3017 to inline it at all call points, which turns out not to be an
3018 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3019*/
3020
3021#if __STD_C
3022static void malloc_mmap_state(void)
3023#else
3024static void malloc_mmap_state()
3025#endif
3026{
3027 int mprot;
3028 unsigned long pagesize = malloc_getpagesize;
3029 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3030
3031 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3032
3033
3034#ifdef NDEBUG
3035 if (foo == MAP_FAILED) {
3036 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3037 abort ();
3038 }
3039#else
3040 assert(foo != MAP_FAILED);
3041#endif
3042
3043 mprot = mprotect(foo, pagesize, PROT_NONE);
3044#ifdef NDEBUG
3045 if (mprot == -1) {
3046 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3047 strerror (errno));
3048 abort ();
3049 }
3050#else
3051 assert(mprot != -1);
3052#endif
3053
3054 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3055
3056 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3057
3058 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3059#ifdef NDEBUG
3060 if (mprot == -1) {
3061 fprintf (stderr,
3062 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3063 strerror (errno));
3064 abort ();
3065 }
3066#else
3067 assert(mprot != -1);
3068#endif
3069}
3070
3071#if __STD_C
3072static void malloc_init_state(mstate av)
3073#else
3074static void malloc_init_state(av) mstate av;
3075#endif
3076{
3077 int i;
3078 mbinptr bin;
3079
3080 void * morecore_test = MORECORE(0);
3081 unsigned long hashval;
3082
3083 /* Test morecore function
3084 */
3085 set_morecore32bit(av);
3086
3087 if (morecore_test == MORECORE_FAILURE)
3088 {
3089 set_nonmorecore32bit(av);
3090 }
3091 else
3092 {
3093 /* On 64bit systems, the heap may be located above the
3094 * 32bit address space. Since mmap() probably still can be
3095 * convinced to map within 32bit, we don't use sbrk().
3096 */
3097 hashval = hash (morecore_test);
3098 if (hashval >= AMOUNTHASH)
3099 {
3100 set_nonmorecore32bit(av);
3101 }
3102 }
3103
3104
3105 /* Establish circular links for normal bins */
3106 for (i = 1; i < NBINS; ++i) {
3107 bin = bin_at(av,i);
3108 bin->fd = bin->bk = bin;
3109 }
3110
3111 av->top_pad = DEFAULT_TOP_PAD;
3112 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3113 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3114 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3115
3116#if MORECORE_CONTIGUOUS
3117 set_contiguous(av);
3118#else
3119 set_noncontiguous(av);
3120#endif
3121
3122 set_max_fast(av, DEFAULT_MXFAST);
3123
3124 av->top = cireg_getfree ();
3125 av->top->chunk = (mchunkptr) startheap;
3126 av->top->size = 0;
3127 set_previnuse(av->top);
3128 clear_inuse(av->top);
3129 hashtable[0] = av->top;
3130 av->pagesize = malloc_getpagesize;
3131
3132 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3133
3134}
3135
3136/*
3137 Other internal utilities operating on mstates
3138*/
3139
3140#if __STD_C
3141static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3142static int sYSTRIm(size_t, mstate);
3143static void malloc_consolidate(mstate);
3144#else
3145static Void_t* sYSMALLOc();
3146static int sYSTRIm();
3147static void malloc_consolidate();
3148#endif
3149
3150/* dnmalloc functions */
3151/* needs mstate so moved here */
3152
3153static chunkinfoptr
3154next_chunkinfo (chunkinfoptr ci)
3155{
3156 mchunkptr nextp;
3157 unsigned long hashval;
3158 chunkinfoptr cinfonextp;
3159 mstate av = get_malloc_state();
3160
3161 /* ci is not the last element in the linked list, just
3162 return the next chunkinfo from the list
3163 */
3164 if (!ci->hash_next)
3165 {
3166 /* ci is the last element, find the next chunkinfo by
3167 * looking up the chunkinfo for the chunk that is after p's chunk
3168 */
3169 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3170
3171 if (!(nextp == av->top->chunk))
3172 {
3173 hashval = hash (nextp);
3174 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3175 cinfonextp = hashtable[hashval];
3176 if (cinfonextp && chunk (cinfonextp) == nextp)
3177 return cinfonextp;
3178
3179#ifdef DNMALLOC_CHECKS_EXTRA
3180 /* This seems bogus; a chunkinfo may legally have no nextp if
3181 * it's the last one allocated (?)
3182 */
3183 else {
3184 if (cinfonextp)
3185 fprintf (stderr,
3186 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3187 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3188 else
3189 fprintf (stderr,
3190 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3191 chunk(ci), hashval, "null", "null", nextp);
3192 }
3193#endif
3194
3195 return NULL;
3196 }
3197 else
3198 {
3199 return av->top;
3200 }
3201
3202 }
3203 else
3204 {
3205 return (ci->hash_next);
3206 }
3207}
3208
3209static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3210 mchunkptr nextp;
3211 if (oldp->hash_next == newp)
3212 return 1;
3213 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3214 if (nextp == chunk(newp))
3215 return 1;
3216 return 0;
3217}
3218
3219
3220
3221/* Get the chunkinfo of the physically previous chunk */
3222/* Since we disposed of prev_size, we need this function to find the previous */
3223
3224static chunkinfoptr
3225prev_chunkinfo (chunkinfoptr ci)
3226{
3227 unsigned int i;
3228 chunkinfoptr prev;
3229 mchunkptr prevchunk = 0;
3230 /* chunkinfoptr temp; */
3231
3232 /* Get the hashtable location of the chunkinfo */
3233 i = hash (chunk (ci));
3234 assert(i < AMOUNTHASH); /* rw */
3235
3236 /* Get the first element of the linked list of chunkinfo's that contains p */
3237 prev = hashtable[i];
3238
3239 if (ci == prev) {
3240 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3241 i = hash(prevchunk);
3242 assert(i < AMOUNTHASH); /* rw */
3243 /* Loop over the linked list until we reach the last element */
3244 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3245 } else {
3246 /* p is not the first element in the linked list, we can just
3247 loop over the list and return the previous
3248 */
3249 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3250 }
3251
3252 return prev;
3253}
3254
3255
3256/*
3257 Debugging support
3258 Dnmalloc broke dlmallocs debugging functions, should fix them some
3259 time in the future, for now leave them undefined.
3260*/
3261
3262#define check_chunk(P)
3263#define check_free_chunk(P)
3264#define check_inuse_chunk(P)
3265#define check_remalloced_chunk(P,N)
3266#define check_malloced_chunk(P,N)
3267#define check_malloc_state()
3268
3269
3270/* ----------- Routines dealing with system allocation -------------- */
3271
3272/*
3273 sysmalloc handles malloc cases requiring more memory from the system.
3274 On entry, it is assumed that av->top does not have enough
3275 space to service request for nb bytes, thus requiring that av->top
3276 be extended or replaced.
3277*/
3278
3279#if __STD_C
3280static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3281#else
3282static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3283#endif
3284{
3285 chunkinfoptr old_top; /* incoming value of av->top */
3286 INTERNAL_SIZE_T old_size; /* its size */
3287 char* old_end; /* its end address */
3288
3289 long size; /* arg to first MORECORE or mmap call */
3290 char* brk; /* return value from MORECORE */
3291
3292 long correction; /* arg to 2nd MORECORE call */
3293 char* snd_brk; /* 2nd return val */
3294
3295 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3296 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3297 char* aligned_brk; /* aligned offset into brk */
3298
3299 chunkinfoptr p; /* the allocated/returned chunk */
3300 chunkinfoptr remainder; /* remainder from allocation */
3301 chunkinfoptr fencepost; /* fencepost */
3302 CHUNK_SIZE_T remainder_size; /* its size */
3303
3304 CHUNK_SIZE_T sum; /* for updating stats */
3305
3306 size_t pagemask = av->pagesize - 1;
3307
3308#ifdef DNMALLOC_DEBUG
3309 fprintf(stderr, "Enter sysmalloc\n");
3310#endif
3311 /*
3312 If there is space available in fastbins, consolidate and retry
3313 malloc from scratch rather than getting memory from system. This
3314 can occur only if nb is in smallbin range so we didn't consolidate
3315 upon entry to malloc. It is much easier to handle this case here
3316 than in malloc proper.
3317 */
3318
3319
3320 if (have_fastchunks(av)) {
3321 Void_t * retval;
3322 assert(in_smallbin_range(nb));
3323 malloc_consolidate(av);
3324#ifdef DNMALLOC_DEBUG
3325 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3326#endif
3327 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3328 VALGRIND_FREELIKE_BLOCK(retval, 0);
3329 return retval;
3330 }
3331
3332
3333 /*
3334 If have mmap, and the request size meets the mmap threshold, and
3335 the system supports mmap, and there are few enough currently
3336 allocated mmapped regions, try to directly map this request
3337 rather than expanding top.
3338 */
3339
3340 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3341 (av->n_mmaps < av->n_mmaps_max))) {
3342
3343 char* mm; /* return value from mmap call*/
3344
3345 /*
3346 Round up size to nearest page. For mmapped chunks, the overhead
3347 is one SIZE_SZ unit larger than for normal chunks, because there
3348 is no following chunk whose prev_size field could be used.
3349 */
3350 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3351
3352 /* Don't try if size wraps around 0 */
3353 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3354
3355
3356 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3357
3358 if (mm != (char*)(MORECORE_FAILURE)) {
3359
3360 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3361
3362 /*
3363 The offset to the start of the mmapped region is stored
3364 in the prev_size field of the chunk. This allows us to adjust
3365 returned start address to meet alignment requirements here
3366 and in memalign(), and still be able to compute proper
3367 address argument for later munmap in free() and realloc().
3368 */
3369
3370 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3371 p = cireg_getfree();
3372
3373 if (front_misalign > 0) {
3374 correction = MALLOC_ALIGNMENT - front_misalign;
3375 p->chunk = (mchunkptr)(mm + correction);
3376 p->hash_next = (chunkinfoptr) correction;
3377 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3378 }
3379 else {
3380 p->chunk = (mchunkptr)mm;
3381 p->hash_next = 0;
3382 set_head(p, size|INUSE|IS_MMAPPED);
3383 }
3384 hashtable_add(p);
3385 /* update statistics */
3386
3387 if (++av->n_mmaps > av->max_n_mmaps)
3388 av->max_n_mmaps = av->n_mmaps;
3389
3390 sum = av->mmapped_mem += size;
3391 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3392 av->max_mmapped_mem = sum;
3393 sum += av->sbrked_mem;
3394 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3395 av->max_total_mem = sum;
3396
3397 check_chunk(p);
3398
3399#ifdef DNMALLOC_DEBUG
3400 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3401 size, (unsigned long)/* size_t */av->max_total_mem );
3402#endif
3403 return chunk(p);
3404 }
3405 }
3406 }
3407
3408 /* Record incoming configuration of top */
3409
3410 old_top = av->top;
3411 old_size = chunksize(old_top);
3412 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3413
3414 brk = snd_brk = (char*)(MORECORE_FAILURE);
3415
3416 /*
3417 If not the first time through, we require old_size to be
3418 at least MINSIZE and to have prev_inuse set.
3419 */
3420
3421 /* assert((old_top == initial_top(av) && old_size == 0) ||
3422 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3423 prev_inuse(old_top))); */
3424
3425 /* Precondition: not enough current space to satisfy nb request */
3426 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3427
3428 /* Precondition: all fastbins are consolidated */
3429 assert(!have_fastchunks(av));
3430
3431 /* Request enough space for nb + pad + overhead */
3432 size = nb + av->top_pad + MINSIZE;
3433
3434 /*
3435 If contiguous, we can subtract out existing space that we hope to
3436 combine with new space. We add it back later only if
3437 we don't actually get contiguous space.
3438 */
3439 if (contiguous(av))
3440 size -= old_size;
3441
3442 /*
3443 Round to a multiple of page size.
3444 If MORECORE is not contiguous, this ensures that we only call it
3445 with whole-page arguments. And if MORECORE is contiguous and
3446 this is not first time through, this preserves page-alignment of
3447 previous calls. Otherwise, we correct to page-align below.
3448 */
3449
3450 size = (size + pagemask) & ~pagemask;
3451
3452 /*
3453 Don't try to call MORECORE if argument is so big as to appear
3454 negative. Note that since mmap takes size_t arg, it may succeed
3455 below even if we cannot call MORECORE.
3456 */
3457 if (size > 0 && morecore32bit(av))
3458 brk = (char*)(MORECORE(size));
3459
3460 /*
3461 If have mmap, try using it as a backup when MORECORE fails or
3462 cannot be used. This is worth doing on systems that have "holes" in
3463 address space, so sbrk cannot extend to give contiguous space, but
3464 space is available elsewhere. Note that we ignore mmap max count
3465 and threshold limits, since the space will not be used as a
3466 segregated mmap region.
3467 */
3468 if (brk != (char*)(MORECORE_FAILURE)) {
3469 av->sbrked_mem += size;
3470 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3471 }
3472
3473 else {
3474
3475#ifdef DNMALLOC_DEBUG
3476 fprintf(stderr, "Morecore failure in sysmalloc\n");
3477#endif
3478
3479 /* Cannot merge with old top, so add its size back in */
3480 if (contiguous(av))
3481 size = (size + old_size + pagemask) & ~pagemask;
3482
3483 /* If we are relying on mmap as backup, then use larger units */
3484 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3485 size = MMAP_AS_MORECORE_SIZE;
3486
3487 /* Don't try if size wraps around 0 */
3488 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3489
3490#ifdef DNMALLOC_DEBUG
3491 fprintf(stderr, "Try mmap in sysmalloc\n");
3492#endif
3493 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3494
3495 if (brk != (char*)(MORECORE_FAILURE)) {
3496
3497 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3498
3499 av->mmapped_mem += size;
3500#ifdef DNMALLOC_DEBUG
3501 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3502#endif
3503
3504 /* We do not need, and cannot use, another sbrk call to find end */
3505 snd_brk = brk + size;
3506
3507 /*
3508 Record that we no longer have a contiguous sbrk region.
3509 After the first time mmap is used as backup, we do not
3510 ever rely on contiguous space since this could incorrectly
3511 bridge regions.
3512 */
3513 set_noncontiguous(av);
3514 }
3515 }
3516 }
3517
3518 if (brk != (char*)(MORECORE_FAILURE)) {
3519#ifdef DNMALLOC_DEBUG
3520 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3521 size, (unsigned long)av->sbrked_mem);
3522#endif
3523 /* av->sbrked_mem += size; moved up */
3524
3525 /*
3526 If MORECORE extends previous space, we can likewise extend top size.
3527 */
3528
3529 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3530 set_head(old_top, (size + old_size) | PREV_INUSE);
3531#ifdef DNMALLOC_DEBUG
3532 fprintf(stderr, "Previous space extended\n");
3533#endif
3534 }
3535
3536 /*
3537 Otherwise, make adjustments:
3538
3539 * If the first time through or noncontiguous, we need to call sbrk
3540 just to find out where the end of memory lies.
3541
3542 * We need to ensure that all returned chunks from malloc will meet
3543 MALLOC_ALIGNMENT
3544
3545 * If there was an intervening foreign sbrk, we need to adjust sbrk
3546 request size to account for fact that we will not be able to
3547 combine new space with existing space in old_top.
3548
3549 * Almost all systems internally allocate whole pages at a time, in
3550 which case we might as well use the whole last page of request.
3551 So we allocate enough more memory to hit a page boundary now,
3552 which in turn causes future contiguous calls to page-align.
3553 */
3554
3555 else {
3556 front_misalign = 0;
3557 end_misalign = 0;
3558 correction = 0;
3559 aligned_brk = brk;
3560
3561 /*
3562 If MORECORE returns an address lower than we have seen before,
3563 we know it isn't really contiguous. This and some subsequent
3564 checks help cope with non-conforming MORECORE functions and
3565 the presence of "foreign" calls to MORECORE from outside of
3566 malloc or by other threads. We cannot guarantee to detect
3567 these in all cases, but cope with the ones we do detect.
3568 */
3569 if (contiguous(av) && old_size != 0 && brk < old_end) {
3570 set_noncontiguous(av);
3571 }
3572
3573 /* handle contiguous cases */
3574 if (contiguous(av)) {
3575
3576#ifdef DNMALLOC_DEBUG
3577 fprintf(stderr, "Handle contiguous cases\n");
3578#endif
3579 /*
3580 We can tolerate forward non-contiguities here (usually due
3581 to foreign calls) but treat them as part of our space for
3582 stats reporting.
3583 */
3584 if (old_size != 0)
3585 av->sbrked_mem += brk - old_end;
3586
3587 /* Guarantee alignment of first new chunk made from this space */
3588
3589 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3590 if (front_misalign > 0) {
3591
3592 /*
3593 Skip over some bytes to arrive at an aligned position.
3594 We don't need to specially mark these wasted front bytes.
3595 They will never be accessed anyway because
3596 prev_inuse of av->top (and any chunk created from its start)
3597 is always true after initialization.
3598 */
3599
3600 correction = MALLOC_ALIGNMENT - front_misalign;
3601 aligned_brk += correction;
3602 }
3603
3604 /*
3605 If this isn't adjacent to existing space, then we will not
3606 be able to merge with old_top space, so must add to 2nd request.
3607 */
3608
3609 correction += old_size;
3610
3611 /* Extend the end address to hit a page boundary */
3612 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3613 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3614
3615 assert(correction >= 0);
3616 snd_brk = (char*)(MORECORE(correction));
3617
3618 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3619 /*
3620 If can't allocate correction, try to at least find out current
3621 brk. It might be enough to proceed without failing.
3622 */
3623 correction = 0;
3624 snd_brk = (char*)(MORECORE(0));
3625 }
3626 else if (snd_brk < brk) {
3627 /*
3628 If the second call gives noncontiguous space even though
3629 it says it won't, the only course of action is to ignore
3630 results of second call, and conservatively estimate where
3631 the first call left us. Also set noncontiguous, so this
3632 won't happen again, leaving at most one hole.
3633
3634 Note that this check is intrinsically incomplete. Because
3635 MORECORE is allowed to give more space than we ask for,
3636 there is no reliable way to detect a noncontiguity
3637 producing a forward gap for the second call.
3638 */
3639 snd_brk = brk + size;
3640 correction = 0;
3641 set_noncontiguous(av);
3642 }
3643 else {
3644 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3645 }
3646
3647 }
3648
3649 /* handle non-contiguous cases */
3650 else {
3651
3652#ifdef DNMALLOC_DEBUG
3653 fprintf(stderr, "Handle non-contiguous cases\n");
3654#endif
3655
3656 /* MORECORE/mmap must correctly align */
3657 assert(aligned_OK(brk));
3658
3659 /* Find out current end of memory */
3660 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3661 snd_brk = (char*)(MORECORE(0));
3662 av->sbrked_mem += snd_brk - brk - size;
3663 }
3664#ifdef DNMALLOC_DEBUG
3665 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3666#endif
3667 }
3668
3669 /* Adjust top based on results of second sbrk.
3670 *
3671 * If mmap() has been used as backup for failed morecore(),
3672 * we end up in this branch as well.
3673 */
3674 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3675#ifdef DNMALLOC_DEBUG
3676 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3677#endif
3678 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3679 av->top = cireg_getfree();
3680 av->top->chunk = (mchunkptr)aligned_brk;
3681 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3682#ifdef DNMALLOC_DEBUG
3683 fprintf(stderr, "Adjust top, top %p size %lu\n",
3684 av->top, (unsigned long)chunksize(av->top));
3685#endif
3686 hashtable_add(av->top);
3687 av->sbrked_mem += correction;
3688
3689 /*
3690 If not the first time through, we either have a
3691 gap due to foreign sbrk or a non-contiguous region. Insert a
3692 double fencepost at old_top to prevent consolidation with space
3693 we don't own. These fenceposts are artificial chunks that are
3694 marked as inuse. Original dlmalloc had two of these but too
3695 small to use. To ensure that the linked lists contain a maximum
3696 of 8 elements we only use 1. Inuse is determined by the
3697 current rather than the next chunk anyway.
3698 */
3699
3700 if (old_size != 0) {
3701#ifdef DNMALLOC_DEBUG
3702 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3703#endif
3704 /*
3705 Shrink old_top to insert fenceposts, keeping size a
3706 multiple of MALLOC_ALIGNMENT. We know there is at least
3707 enough space in old_top to do this.
3708 */
3709#ifdef DNMALLOC_DEBUG
3710 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3711 old_top, (unsigned long)old_size);
3712#endif
3713 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3714 set_head(old_top, old_size | PREV_INUSE);
3715#ifdef DNMALLOC_DEBUG
3716 fprintf(stderr, "Adjust top, old_size after %lu\n",
3717 (unsigned long)old_size);
3718#endif
3719
3720 /*
3721 Note that the following assignments completely overwrite
3722 old_top when old_size was previously MINSIZE. This is
3723 intentional. We need the fencepost, even if old_top otherwise gets
3724 lost.
3725 */
3726 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3727 it's marked inuse it will never be coalesced
3728 */
3729 fencepost = cireg_getfree();
3730 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3731 old_size);
3732 fencepost->size = 16|INUSE|PREV_INUSE;
3733 hashtable_add(fencepost);
3734 /*
3735 If possible, release the rest, suppressing trimming.
3736 */
3737 if (old_size >= MINSIZE) {
3738 INTERNAL_SIZE_T tt = av->trim_threshold;
3739#ifdef DNMALLOC_DEBUG
3740 fprintf(stderr, "Release\n");
3741#endif
3742 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3743 set_head(old_top, old_size | PREV_INUSE | INUSE);
3744 guard_set(av->guard_stored, old_top, 0, old_size);
3745 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
3746 fREe(chunk(old_top));
3747 av->trim_threshold = tt;
3748#ifdef DNMALLOC_DEBUG
3749 fprintf(stderr, "Release done\n");
3750#endif
3751 }
3752
3753#ifdef DNMALLOC_DEBUG
3754 fprintf(stderr, "Adjust top, size %lu\n",
3755 (unsigned long)chunksize(av->top));
3756#endif
3757
3758 } /* fenceposts */
3759 } /* adjust top */
3760 } /* not extended previous region */
3761
3762 /* Update statistics */ /* FIXME check this */
3763 sum = av->sbrked_mem;
3764 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3765 av->max_sbrked_mem = sum;
3766
3767 sum += av->mmapped_mem;
3768 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3769 av->max_total_mem = sum;
3770
3771 check_malloc_state();
3772
3773 /* finally, do the allocation */
3774
3775 p = av->top;
3776 size = chunksize(p);
3777
3778#ifdef DNMALLOC_DEBUG
3779 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3780 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3781#endif
3782
3783 /* check that one of the above allocation paths succeeded */
3784 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3785 remainder_size = size - nb;
3786 remainder = cireg_getfree();
3787 remainder->chunk = chunk_at_offset(chunk(p), nb);
3788 av->top = remainder;
3789 set_head(p, nb | PREV_INUSE | INUSE);
3790 set_head(remainder, remainder_size | PREV_INUSE);
3791 hashtable_insert (p, av->top);
3792 check_malloced_chunk(p, nb);
3793#ifdef DNMALLOC_DEBUG
3794 fprintf(stderr, "Return any (total %lu)\n",
3795 (unsigned long)/* size_t */av->max_total_mem );
3796#endif
3797 return chunk(p);
3798 }
3799
3800 }
3801
3802#ifdef DNMALLOC_DEBUG
3803 fprintf(stderr, "Return failed (total %lu)\n",
3804 (unsigned long)/* size_t */av->max_total_mem );
3805#endif
3806
3807 /* catch all failure paths */
3808 MALLOC_FAILURE_ACTION;
3809 return 0;
3810}
3811
3812
3813
3814
3815/*
3816 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3817 to the system (via negative arguments to sbrk) if there is unused
3818 memory at the `high' end of the malloc pool. It is called
3819 automatically by free() when top space exceeds the trim
3820 threshold. It is also called by the public malloc_trim routine. It
3821 returns 1 if it actually released any memory, else 0.
3822*/
3823
3824#if __STD_C
3825static int sYSTRIm(size_t pad, mstate av)
3826#else
3827static int sYSTRIm(pad, av) size_t pad; mstate av;
3828#endif
3829{
3830 long top_size; /* Amount of top-most memory */
3831 long extra; /* Amount to release */
3832 long released; /* Amount actually released */
3833 char* current_brk; /* address returned by pre-check sbrk call */
3834 char* new_brk; /* address returned by post-check sbrk call */
3835 size_t pagesz;
3836
3837 pagesz = av->pagesize;
3838 top_size = chunksize(av->top);
3839
3840 /* Release in pagesize units, keeping at least one page */
3841 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3842
3843 if (extra > 0) {
3844
3845 /*
3846 Only proceed if end of memory is where we last set it.
3847 This avoids problems if there were foreign sbrk calls.
3848 */
3849 current_brk = (char*)(MORECORE(0));
3850 if (current_brk == (char*)(av->top) + top_size) {
3851
3852 /*
3853 Attempt to release memory. We ignore MORECORE return value,
3854 and instead call again to find out where new end of memory is.
3855 This avoids problems if first call releases less than we asked,
3856 of if failure somehow altered brk value. (We could still
3857 encounter problems if it altered brk in some very bad way,
3858 but the only thing we can do is adjust anyway, which will cause
3859 some downstream failure.)
3860 */
3861
3862 MORECORE(-extra);
3863 new_brk = (char*)(MORECORE(0));
3864
3865 if (new_brk != (char*)MORECORE_FAILURE) {
3866 released = (long)(current_brk - new_brk);
3867
3868 if (released != 0) {
3869 /* Success. Adjust top. */
3870 av->sbrked_mem -= released;
3871 set_head(av->top, (top_size - released) | PREV_INUSE);
3872 check_malloc_state();
3873 return 1;
3874 }
3875 }
3876 }
3877 }
3878 return 0;
3879}
3880
3881/*
3882 ------------------------------ malloc ------------------------------
3883*/
3884
3885
3886#if __STD_C
3887DL_STATIC Void_t* mALLOc(size_t bytes)
3888#else
3889DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3890#endif
3891{
3892 mstate av = get_malloc_state();
3893
3894 INTERNAL_SIZE_T nb; /* normalized request size */
3895 unsigned int idx; /* associated bin index */
3896 mbinptr bin; /* associated bin */
3897 mfastbinptr* fb; /* associated fastbin */
3898
3899 chunkinfoptr victim; /* inspected/selected chunk */
3900 INTERNAL_SIZE_T size; /* its size */
3901 int victim_index; /* its bin index */
3902
3903 chunkinfoptr remainder; /* remainder from a split */
3904 CHUNK_SIZE_T remainder_size; /* its size */
3905
3906 unsigned int block; /* bit map traverser */
3907 unsigned int bit; /* bit map traverser */
3908 unsigned int map; /* current word of binmap */
3909
3910 chunkinfoptr fwd; /* misc temp for linking */
3911 chunkinfoptr bck; /* misc temp for linking */
3912
3913 Void_t* retval;
3914
3915 /* chunkinfoptr next; */
3916
3917
3918 /*
3919 Convert request size to internal form by adding SIZE_SZ bytes
3920 overhead plus possibly more to obtain necessary alignment and/or
3921 to obtain a size of at least MINSIZE, the smallest allocatable
3922 size. Also, checked_request2size traps (returning 0) request sizes
3923 that are so large that they wrap around zero when padded and
3924 aligned.
3925 */
3926#if defined(SH_CUTEST)
3927 extern int malloc_count;
3928 ++malloc_count;
3929#endif
3930
3931 checked_request2size(bytes, nb);
3932
3933 /*
3934 Bypass search if no frees yet
3935 */
3936 if (av && have_anychunks(av)) {
3937 goto av_initialized;
3938 }
3939 else {
3940 if (!av || av->max_fast == 0) { /* initialization check */
3941 malloc_consolidate(av);
3942 av = get_malloc_state();
3943 }
3944 goto use_top;
3945 }
3946
3947 av_initialized:
3948
3949 /*
3950 If the size qualifies as a fastbin, first check corresponding bin.
3951 */
3952 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3953 fb = &(av->fastbins[(fastbin_index(nb))]);
3954 if ( (victim = *fb) != 0) {
3955 *fb = victim->fd;
3956 check_remalloced_chunk(victim, nb);
3957 guard_set(av->guard_stored, victim, bytes, nb);
3958 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
3959 return chunk(victim);
3960 }
3961 }
3962
3963 /*
3964 If a small request, check regular bin. Since these "smallbins"
3965 hold one size each, no searching within bins is necessary.
3966 (For a large request, we need to wait until unsorted chunks are
3967 processed to find best fit. But for small ones, fits are exact
3968 anyway, so we can check now, which is faster.)
3969 */
3970
3971 if (in_smallbin_range(nb)) {
3972 idx = smallbin_index(nb);
3973 bin = bin_at(av,idx);
3974
3975 if ((victim = last(bin)) != bin) {
3976 bck = victim->bk;
3977 bin->bk = bck;
3978 bck->fd = bin;
3979
3980 set_all_inuse(victim);
3981
3982 check_malloced_chunk(victim, nb);
3983 guard_set(av->guard_stored, victim, bytes, nb);
3984 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
3985 return chunk(victim);
3986 }
3987 }
3988
3989 /*
3990 If this is a large request, consolidate fastbins before continuing.
3991 While it might look excessive to kill all fastbins before
3992 even seeing if there is space available, this avoids
3993 fragmentation problems normally associated with fastbins.
3994 Also, in practice, programs tend to have runs of either small or
3995 large requests, but less often mixtures, so consolidation is not
3996 invoked all that often in most programs. And the programs that
3997 it is called frequently in otherwise tend to fragment.
3998 */
3999
4000 else {
4001 idx = largebin_index(nb);
4002 if (have_fastchunks(av))
4003 malloc_consolidate(av);
4004 }
4005
4006 /*
4007 Process recently freed or remaindered chunks, taking one only if
4008 it is exact fit, or, if this a small request, the chunk is remainder from
4009 the most recent non-exact fit. Place other traversed chunks in
4010 bins. Note that this step is the only place in any routine where
4011 chunks are placed in bins.
4012 */
4013
4014 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4015 bck = victim->bk;
4016 size = chunksize(victim);
4017
4018 /*
4019 If a small request, try to use last remainder if it is the
4020 only chunk in unsorted bin. This helps promote locality for
4021 runs of consecutive small requests. This is the only
4022 exception to best-fit, and applies only when there is
4023 no exact fit for a small chunk.
4024 */
4025
4026 if (UNLIKELY(in_smallbin_range(nb) &&
4027 bck == unsorted_chunks(av) &&
4028 victim == av->last_remainder &&
4029 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4030
4031 /* split and reattach remainder */
4032 remainder_size = size - nb;
4033 remainder = cireg_getfree();
4034 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4035 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4036 av->last_remainder = remainder;
4037 remainder->bk = remainder->fd = unsorted_chunks(av);
4038
4039 set_head(victim, nb | PREV_INUSE|INUSE);
4040 set_head(remainder, remainder_size | PREV_INUSE);
4041 hashtable_insert(victim, remainder);
4042
4043 check_malloced_chunk(victim, nb);
4044 guard_set(av->guard_stored, victim, bytes, nb);
4045 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4046 return chunk(victim);
4047 }
4048
4049 /* remove from unsorted list */
4050 unsorted_chunks(av)->bk = bck;
4051 bck->fd = unsorted_chunks(av);
4052
4053 /* Take now instead of binning if exact fit */
4054
4055 if (UNLIKELY(size == nb)) {
4056 set_all_inuse(victim)
4057 check_malloced_chunk(victim, nb);
4058 guard_set(av->guard_stored, victim, bytes, nb);
4059 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4060 return chunk(victim);
4061 }
4062
4063 /* place chunk in bin */
4064
4065 if (in_smallbin_range(size)) {
4066
4067 victim_index = smallbin_index(size);
4068 bck = bin_at(av, victim_index);
4069 fwd = bck->fd;
4070 }
4071 else {
4072 victim_index = largebin_index(size);
4073 bck = bin_at(av, victim_index);
4074 fwd = bck->fd;
4075
4076 if (UNLIKELY(fwd != bck)) {
4077 /* if smaller than smallest, place first */
4078 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4079 fwd = bck;
4080 bck = bck->bk;
4081 }
4082 else if ((CHUNK_SIZE_T)(size) >=
4083 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4084
4085 /* maintain large bins in sorted order */
4086 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4087 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4088 fwd = fwd->fd;
4089 bck = fwd->bk;
4090 }
4091 }
4092 }
4093
4094 mark_bin(av, victim_index);
4095 victim->bk = bck;
4096 victim->fd = fwd;
4097 fwd->bk = victim;
4098 bck->fd = victim;
4099 }
4100
4101 /*
4102 If a large request, scan through the chunks of current bin to
4103 find one that fits. (This will be the smallest that fits unless
4104 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4105 the only step where an unbounded number of chunks might be
4106 scanned without doing anything useful with them. However the
4107 lists tend to be short.
4108 */
4109
4110 if (!in_smallbin_range(nb)) {
4111 bin = bin_at(av, idx);
4112
4113 victim = last(bin);
4114
4115 if (UNLIKELY(victim != bin)) {
4116
4117 do {
4118 size = chunksize(victim);
4119
4120 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4121 remainder_size = size - nb;
4122 unlink(victim, bck, fwd);
4123
4124 /* Split */
4125 if (remainder_size >= MINSIZE) {
4126 remainder = cireg_getfree();
4127 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4128 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4129 remainder->bk = remainder->fd = unsorted_chunks(av);
4130 set_head(victim, nb | PREV_INUSE | INUSE);
4131 set_head(remainder, remainder_size | PREV_INUSE);
4132 hashtable_insert(victim, remainder);
4133 check_malloced_chunk(victim, nb);
4134 guard_set(av->guard_stored, victim, bytes, nb);
4135 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4136 return chunk(victim);
4137 }
4138 /* Exhaust */
4139 else {
4140 set_all_inuse(victim);
4141 check_malloced_chunk(victim, nb);
4142 guard_set(av->guard_stored, victim, bytes, nb);
4143 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4144 return chunk(victim);
4145 }
4146 }
4147 victim = victim->bk;
4148 } while(victim != bin);
4149 }
4150 }
4151
4152 /*
4153 Search for a chunk by scanning bins, starting with next largest
4154 bin. This search is strictly by best-fit; i.e., the smallest
4155 (with ties going to approximately the least recently used) chunk
4156 that fits is selected.
4157
4158 The bitmap avoids needing to check that most blocks are nonempty.
4159 */
4160
4161
4162 ++idx;
4163 bin = bin_at(av,idx);
4164 block = idx2block(idx);
4165 map = av->binmap[block];
4166 bit = idx2bit(idx);
4167
4168 for (;;) {
4169
4170 /* Skip rest of block if there are no more set bits in this block. */
4171 if (bit > map || bit == 0) {
4172 do {
4173 if (++block >= BINMAPSIZE) /* out of bins */
4174 goto use_top;
4175 } while ( (map = av->binmap[block]) == 0);
4176
4177 bin = bin_at(av, (block << BINMAPSHIFT));
4178 bit = 1;
4179 }
4180
4181 /* Advance to bin with set bit. There must be one. */
4182 while ((bit & map) == 0) {
4183 bin = next_bin(bin);
4184 bit <<= 1;
4185 assert(bit != 0);
4186 }
4187
4188 /* Inspect the bin. It is likely to be non-empty */
4189 victim = last(bin);
4190
4191 /* If a false alarm (empty bin), clear the bit. */
4192 if (victim == bin) {
4193 av->binmap[block] = map &= ~bit; /* Write through */
4194 bin = next_bin(bin);
4195 bit <<= 1;
4196 }
4197
4198 else {
4199 size = chunksize(victim);
4200
4201 /* We know the first chunk in this bin is big enough to use. */
4202 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4203
4204 remainder_size = size - nb;
4205
4206 /* unlink */
4207 bck = victim->bk;
4208 bin->bk = bck;
4209 bck->fd = bin;
4210
4211 /* Split */
4212 if (remainder_size >= MINSIZE) {
4213 remainder = cireg_getfree();
4214 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4215
4216 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4217 remainder->bk = remainder->fd = unsorted_chunks(av);
4218 /* advertise as last remainder */
4219 if (in_smallbin_range(nb))
4220 av->last_remainder = remainder;
4221
4222 set_head(victim, nb | PREV_INUSE | INUSE);
4223 set_head(remainder, remainder_size | PREV_INUSE);
4224 hashtable_insert(victim, remainder);
4225 check_malloced_chunk(victim, nb);
4226 guard_set(av->guard_stored, victim, bytes, nb);
4227 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4228 return chunk(victim);
4229 }
4230 /* Exhaust */
4231 else {
4232 set_all_inuse(victim);
4233 check_malloced_chunk(victim, nb);
4234 guard_set(av->guard_stored, victim, bytes, nb);
4235 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4236 return chunk(victim);
4237 }
4238
4239 }
4240 }
4241
4242 use_top:
4243
4244
4245 /*
4246 If large enough, split off the chunk bordering the end of memory
4247 (held in av->top). Note that this is in accord with the best-fit
4248 search rule. In effect, av->top is treated as larger (and thus
4249 less well fitting) than any other available chunk since it can
4250 be extended to be as large as necessary (up to system
4251 limitations).
4252
4253 We require that av->top always exists (i.e., has size >=
4254 MINSIZE) after initialization, so if it would otherwise be
4255 exhuasted by current request, it is replenished. (The main
4256 reason for ensuring it exists is that we may need MINSIZE space
4257 to put in fenceposts in sysmalloc.)
4258 */
4259
4260 victim = av->top;
4261 size = chunksize(victim);
4262
4263 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
4264 remainder = cireg_getfree();
4265 remainder_size = size - nb;
4266 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4267 av->top = remainder;
4268 set_head(victim, nb | PREV_INUSE | INUSE);
4269 set_head(remainder, remainder_size | PREV_INUSE);
4270 hashtable_insert(victim, remainder);
4271 check_malloced_chunk(victim, nb);
4272 guard_set(av->guard_stored, victim, bytes, nb);
4273 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4274 return chunk(victim);
4275 }
4276
4277 /*
4278 If no space in top, relay to handle system-dependent cases
4279 */
4280 retval = sYSMALLOc(nb, av);
4281 if (retval) {
4282 victim = mem2chunk(retval);
4283 guard_set(av->guard_stored, victim, bytes, nb);
4284 }
4285 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
4286 return retval;
4287}
4288
4289/*
4290 ------------------------------ free ------------------------------
4291*/
4292
4293#if __STD_C
4294DL_STATIC void fREe(Void_t* mem)
4295#else
4296DL_STATIC void fREe(mem) Void_t* mem;
4297#endif
4298{
4299 mstate av = get_malloc_state();
4300
4301 chunkinfoptr p; /* chunk corresponding to mem */
4302 INTERNAL_SIZE_T size; /* its size */
4303 mfastbinptr* fb; /* associated fastbin */
4304 chunkinfoptr prevchunk; /* previous physical chunk */
4305 chunkinfoptr nextchunk; /* next contiguous chunk */
4306 INTERNAL_SIZE_T nextsize; /* its size */
4307 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4308 chunkinfoptr bck; /* misc temp for linking */
4309 chunkinfoptr fwd; /* misc temp for linking */
4310 chunkinfoptr next;
4311#if defined(SH_CUTEST)
4312 extern int malloc_count;
4313 --malloc_count;
4314#endif
4315
4316 /* free(0) has no effect */
4317 if (mem != 0) {
4318 p = hashtable_lookup(mem);
4319 /* check that memory is managed by us
4320 * and is inuse
4321 */
4322 if (UNLIKELY(!p || !inuse(p)))
4323 {
4324#ifdef DNMALLOC_CHECKS
4325 if (p) {
4326 fprintf(stderr, "Attempt to free memory not in use\n");
4327 abort();
4328 } else {
4329 fprintf(stderr, "Attempt to free memory not allocated\n");
4330 abort();
4331 }
4332#endif
4333 assert(p && inuse(p));
4334 return;
4335 }
4336
4337 VALGRIND_FREELIKE_BLOCK(mem, 0);
4338
4339 guard_check(av->guard_stored, p);
4340
4341 size = chunksize(p);
4342
4343 check_inuse_chunk(p);
4344
4345 /*
4346 If eligible, place chunk on a fastbin so it can be found
4347 and used quickly in malloc.
4348 */
4349
4350 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4351
4352#if TRIM_FASTBINS
4353 /*
4354 If TRIM_FASTBINS set, don't place chunks
4355 bordering top into fastbins
4356 */
4357 && (chunk_at_offset(chunk(p), size) != av->top)
4358#endif
4359 ) {
4360
4361 set_fastchunks(av);
4362 fb = &(av->fastbins[fastbin_index(size)]);
4363 p->fd = *fb;
4364 *fb = p;
4365 }
4366
4367 /*
4368 Consolidate other non-mmapped chunks as they arrive.
4369 */
4370
4371 else if (!chunk_is_mmapped(p)) {
4372 set_anychunks(av);
4373
4374 nextchunk = next_chunkinfo(p);
4375 if (nextchunk)
4376 nextsize = chunksize(nextchunk);
4377 else
4378 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4379
4380 /* consolidate backward */
4381 if (UNLIKELY(!prev_inuse(p))) {
4382 prevchunk = prev_chunkinfo(p);
4383 prevsize = chunksize(prevchunk);
4384#ifdef DNMALLOC_CHECKS
4385 if (inuse(prevchunk)) {
4386 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4387 abort();
4388 }
4389#else
4390 assert(!inuse(prevchunk));
4391#endif
4392 size += prevsize;
4393 unlink(prevchunk, bck, fwd);
4394 set_head(p, size | PREV_INUSE);
4395 hashtable_skiprm(prevchunk,p);
4396 /* This chunk no longer exists in any form: release the chunkinfoptr
4397 */
4398 freecilst_add(p);
4399 p = prevchunk;
4400 }
4401
4402 if (nextchunk) {
4403 if (nextchunk != av->top) {
4404 /* get and clear inuse bit */
4405 clear_previnuse(nextchunk);
4406
4407 /* consolidate forward */
4408 if (!inuse(nextchunk)) {
4409 /* if mmap is used instead of sbrk, we may have a
4410 * chunk with !nextchunk->fd && !nextchunk->bk
4411 */
4412 if (nextchunk->fd && nextchunk->fd)
4413 {
4414 unlink(nextchunk, bck, fwd);
4415 size += nextsize;
4416 set_head(p, size | PREV_INUSE);
4417 hashtable_skiprm(p, nextchunk);
4418 freecilst_add (nextchunk);
4419 }
4420 }
4421
4422 set_head(p, size | PREV_INUSE);
4423 next = next_chunkinfo(p);
4424 if (next)
4425 next->prev_size = size;
4426
4427 /*
4428 Place the chunk in unsorted chunk list. Chunks are
4429 not placed into regular bins until after they have
4430 been given one chance to be used in malloc.
4431 */
4432
4433 bck = unsorted_chunks(av);
4434 fwd = bck->fd;
4435 p->bk = bck;
4436 p->fd = fwd;
4437 bck->fd = p;
4438 fwd->bk = p;
4439
4440 nextchunk = next_chunkinfo(p);
4441 if (nextchunk)
4442 nextchunk->prev_size = chunksize(p);
4443
4444 check_free_chunk(p);
4445 }
4446
4447 /*
4448 If the chunk borders the current high end of memory,
4449 consolidate into top
4450 */
4451
4452 else {
4453 size += nextsize;
4454 set_head(p, size | PREV_INUSE);
4455 hashtable_remove(chunk(av->top));
4456 freecilst_add(av->top);
4457 av->top = p;
4458 check_chunk(p);
4459 }
4460 } /* if (nextchunk) */
4461
4462 /*
4463 If freeing a large space, consolidate possibly-surrounding
4464 chunks. Then, if the total unused topmost memory exceeds trim
4465 threshold, ask malloc_trim to reduce top.
4466
4467 Unless max_fast is 0, we don't know if there are fastbins
4468 bordering top, so we cannot tell for sure whether threshold
4469 has been reached unless fastbins are consolidated. But we
4470 don't want to consolidate on each free. As a compromise,
4471 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4472 is reached.
4473 */
4474
4475 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4476 if (have_fastchunks(av))
4477 malloc_consolidate(av);
4478
4479#ifndef MORECORE_CANNOT_TRIM
4480 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4481 (CHUNK_SIZE_T)(av->trim_threshold))
4482 {
4483 if (morecore32bit(av))
4484 {
4485#ifdef DNMALLOC_DEBUG
4486 fprintf(stderr, "Calling systrim from free()\n");
4487#endif
4488 sYSTRIm(av->top_pad, av);
4489#ifdef DNMALLOC_DEBUG
4490 fprintf(stderr, "Systrim done\n");
4491#endif
4492 }
4493 }
4494#endif
4495 }
4496
4497 }
4498 /*
4499 If the chunk was allocated via mmap, release via munmap()
4500 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4501 true, then user must have overwritten memory. There's nothing
4502 we can do to catch this error unless DEBUG is set, in which case
4503 check_inuse_chunk (above) will have triggered error.
4504 */
4505
4506 else {
4507 int ret;
4508 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4509 av->n_mmaps--;
4510 av->mmapped_mem -= (size + offset);
4511 ret = munmap((char*) chunk(p) - offset, size + offset);
4512 hashtable_remove_mmapped(chunk(p));
4513 freecilst_add(p);
4514 /* munmap returns non-zero on failure */
4515 assert(ret == 0);
4516 }
4517 }
4518}
4519
4520/*
4521 ------------------------- malloc_consolidate -------------------------
4522
4523 malloc_consolidate is a specialized version of free() that tears
4524 down chunks held in fastbins. Free itself cannot be used for this
4525 purpose since, among other things, it might place chunks back onto
4526 fastbins. So, instead, we need to use a minor variant of the same
4527 code.
4528
4529 Also, because this routine needs to be called the first time through
4530 malloc anyway, it turns out to be the perfect place to trigger
4531 initialization code.
4532*/
4533
4534#if __STD_C
4535static void malloc_consolidate(mstate av)
4536#else
4537static void malloc_consolidate(av) mstate av;
4538#endif
4539{
4540 mfastbinptr* fb; /* current fastbin being consolidated */
4541 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4542 chunkinfoptr p; /* current chunk being consolidated */
4543 chunkinfoptr nextp; /* next chunk to consolidate */
4544 chunkinfoptr prevp;
4545 chunkinfoptr unsorted_bin; /* bin header */
4546 chunkinfoptr first_unsorted; /* chunk to link to */
4547
4548 /* These have same use as in free() */
4549 chunkinfoptr nextchunk;
4550 INTERNAL_SIZE_T size;
4551 INTERNAL_SIZE_T nextsize;
4552 INTERNAL_SIZE_T prevsize;
4553 chunkinfoptr bck;
4554 chunkinfoptr fwd;
4555 chunkinfoptr next;
4556
4557 /*
4558 If max_fast is 0, we know that av hasn't
4559 yet been initialized, in which case do so below
4560 */
4561 if (av && av->max_fast != 0) {
4562
4563 clear_fastchunks(av);
4564
4565 unsorted_bin = unsorted_chunks(av);
4566
4567 /*
4568 Remove each chunk from fast bin and consolidate it, placing it
4569 then in unsorted bin. Among other reasons for doing this,
4570 placing in unsorted bin avoids needing to calculate actual bins
4571 until malloc is sure that chunks aren't immediately going to be
4572 reused anyway.
4573 */
4574
4575 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4576 fb = &(av->fastbins[0]);
4577 do {
4578 if ( UNLIKELY((p = *fb) != 0)) {
4579 *fb = 0;
4580 do {
4581 check_inuse_chunk(p);
4582 nextp = p->fd;
4583
4584 /*
4585 * Slightly streamlined version of consolidation code in free()
4586 */
4587
4588 size = chunksize(p);
4589 nextchunk = next_chunkinfo(p);
4590
4591 /* gcc doesn't notice that it's only used if (nextchunk) */
4592 if (nextchunk)
4593 nextsize = chunksize(nextchunk);
4594 else
4595 nextsize = 0;
4596
4597 if (!prev_inuse(p)) {
4598 prevp = prev_chunkinfo(p);
4599 prevsize = chunksize(prevp);
4600 size += prevsize;
4601#ifdef DNMALLOC_CHECKS
4602 if (inuse(prevp)) {
4603 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4604 abort();
4605 }
4606#else
4607 assert(!inuse(prevp));
4608#endif
4609 unlink(prevp, bck, fwd);
4610 set_head(p, size | PREV_INUSE);
4611 hashtable_skiprm(prevp,p);
4612 freecilst_add(p);
4613 p=prevp;
4614 }
4615
4616 if (nextchunk) {
4617 if (nextchunk != av->top) {
4618
4619 clear_previnuse(nextchunk);
4620
4621 /* if mmap is used instead of sbrk, we may have a
4622 * chunk with !nextchunk->fd && !nextchunk->bk
4623 */
4624 if (!inuse(nextchunk)) {
4625 if( nextchunk->fd && nextchunk->bk) {
4626 size += nextsize;
4627 unlink(nextchunk, bck, fwd);
4628 set_head(p, size | PREV_INUSE);
4629 hashtable_skiprm(p,nextchunk);
4630 freecilst_add(nextchunk);
4631 }
4632 }
4633
4634 first_unsorted = unsorted_bin->fd;
4635 unsorted_bin->fd = p;
4636 first_unsorted->bk = p;
4637
4638 set_head(p, size | PREV_INUSE);
4639 p->bk = unsorted_bin;
4640 p->fd = first_unsorted;
4641 next = next_chunkinfo(p);
4642 if (next)
4643 next->prev_size = size;
4644
4645
4646 }
4647
4648 else if (nextchunk == av->top) {
4649 size += nextsize;
4650 set_head(p, size | PREV_INUSE);
4651 hashtable_remove(chunk(av->top));
4652 freecilst_add(av->top);
4653 av->top = p;
4654 }
4655 } /* if (nextchunk) */
4656
4657 } while ( (p = nextp) != 0);
4658
4659 }
4660 } while (fb++ != maxfb);
4661 }
4662 else {
4663 /* Initialize dnmalloc */
4664 dnmalloc_init();
4665 malloc_init_state(get_malloc_state());
4666 check_malloc_state();
4667 }
4668}
4669
4670/*
4671 ------------------------------ realloc ------------------------------
4672*/
4673
4674
4675#if __STD_C
4676DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4677#else
4678DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4679#endif
4680{
4681 mstate av = get_malloc_state();
4682
4683 INTERNAL_SIZE_T nb; /* padded request size */
4684
4685 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4686 INTERNAL_SIZE_T oldsize; /* its size */
4687
4688 chunkinfoptr newp; /* chunk to return */
4689 INTERNAL_SIZE_T newsize; /* its size */
4690 Void_t* newmem; /* corresponding user mem */
4691
4692 chunkinfoptr next; /* next contiguous chunk after oldp */
4693
4694 chunkinfoptr remainder; /* extra space at end of newp */
4695 CHUNK_SIZE_T remainder_size; /* its size */
4696
4697 chunkinfoptr bck; /* misc temp for linking */
4698 chunkinfoptr fwd; /* misc temp for linking */
4699
4700 CHUNK_SIZE_T copysize; /* bytes to copy */
4701 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4702 INTERNAL_SIZE_T* s; /* copy source */
4703 INTERNAL_SIZE_T* d; /* copy destination */
4704
4705
4706#ifdef REALLOC_ZERO_BYTES_FREES
4707 if (UNLIKELY(bytes == 0)) {
4708 fREe(oldmem);
4709 return 0;
4710 }
4711#endif
4712
4713 if (UNLIKELY(!av || av->max_fast == 0)) {
4714 malloc_consolidate(av);
4715 av = get_malloc_state();
4716 }
4717
4718 /* realloc of null is supposed to be same as malloc */
4719 if (UNLIKELY(oldmem == 0))
4720 return mALLOc(bytes);
4721
4722 checked_request2size(bytes, nb);
4723
4724 oldp = hashtable_lookup(oldmem);
4725
4726 if (UNLIKELY(!oldp || !inuse(oldp))){
4727 /* attempt to either realloc memory not managed by us
4728 * or memory that is not in use
4729 */
4730#ifdef DNMALLOC_CHECKS
4731 if (oldp) {
4732 fprintf(stderr, "Attempt to free memory not in use\n");
4733 abort();
4734 } else {
4735 fprintf(stderr, "Attempt to free memory not allocated\n");
4736 abort();
4737 }
4738#endif
4739 assert(oldp && inuse(oldp));
4740 return 0;
4741 }
4742
4743 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4744 guard_check(av->guard_stored, oldp);
4745
4746 oldsize = chunksize(oldp);
4747
4748 check_inuse_chunk(oldp);
4749
4750 if (!chunk_is_mmapped(oldp)) {
4751
4752 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4753 /* already big enough; split below */
4754 newp = oldp;
4755 newsize = oldsize;
4756 }
4757
4758 else {
4759 next = next_chunkinfo(oldp);
4760 if (next)
4761 next->prev_size = oldsize;
4762 /* Try to expand forward into top */
4763 if (next && next == av->top &&
4764 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4765 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4766 set_head_size(oldp, nb);
4767 hashtable_remove(chunk(av->top));
4768 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4769 set_head(av->top, (newsize - nb) | PREV_INUSE);
4770 /* av->top->chunk has been moved move in hashtable */
4771 hashtable_insert(oldp, av->top);
4772 guard_set(av->guard_stored, oldp, bytes, nb);
4773 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
4774 return chunk(oldp);
4775 }
4776
4777 /* Try to expand forward into next chunk; split off remainder below */
4778 else if (next && next != av->top &&
4779 !inuse(next) &&
4780 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4781 (CHUNK_SIZE_T)(nb)) {
4782 newp = oldp;
4783 unlink(next, bck, fwd);
4784 hashtable_remove(chunk(next));
4785 freecilst_add(next);
4786 next = next_chunkinfo(oldp);
4787 if (next)
4788 next->prev_size = newsize;
4789 }
4790
4791 /* allocate, copy, free */
4792 else {
4793
4794 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4795 if (newmem == 0)
4796 return 0; /* propagate failure */
4797
4798 newp = hashtable_lookup(newmem);
4799 newsize = chunksize(newp);
4800
4801
4802 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4803 /*
4804 Avoid copy if newp is next chunk after oldp.
4805 */
4806 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4807 newsize += oldsize;
4808 set_head_size(oldp, newsize);
4809 hashtable_skiprm(oldp, newp);
4810 freecilst_add(newp);
4811 newp = oldp;
4812 }
4813 else {
4814 /*
4815 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4816 We know that contents have an even number of
4817 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4818 */
4819
4820 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4821
4822 copysize = oldsize;
4823 s = (INTERNAL_SIZE_T*)(oldmem);
4824 d = (INTERNAL_SIZE_T*)(newmem);
4825 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4826 assert(ncopies >= 2);
4827
4828 if (ncopies > 10)
4829 MALLOC_COPY(d, s, copysize);
4830
4831 else {
4832 *(d+0) = *(s+0);
4833 *(d+1) = *(s+1);
4834 if (ncopies > 2) {
4835 *(d+2) = *(s+2);
4836 *(d+3) = *(s+3);
4837 if (ncopies > 4) {
4838 *(d+4) = *(s+4);
4839 *(d+5) = *(s+5);
4840 if (ncopies > 6) {
4841 *(d+6) = *(s+6);
4842 *(d+7) = *(s+7);
4843 if (ncopies > 8) {
4844 *(d+8) = *(s+8);
4845 *(d+9) = *(s+9);
4846 }
4847 }
4848 }
4849 }
4850 }
4851
4852 fREe(oldmem);
4853 check_inuse_chunk(newp);
4854 guard_set(av->guard_stored, newp, bytes, nb);
4855 return chunk(newp);
4856 }
4857 }
4858 }
4859
4860 /* If possible, free extra space in old or extended chunk */
4861
4862 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4863
4864 remainder_size = newsize - nb;
4865
4866 if (remainder_size >= MINSIZE) { /* split remainder */
4867 remainder = cireg_getfree();
4868 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4869 set_head_size(newp, nb);
4870 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4871 remainder->prev_size = nb;
4872 hashtable_insert(newp, remainder);
4873 /* Mark remainder as inuse so free() won't complain */
4874 set_all_inuse(remainder);
4875 guard_set(av->guard_stored, remainder, 0, remainder_size);
4876 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
4877 fREe(chunk(remainder));
4878 }
4879 else { /* not enough extra to split off */
4880 set_head_size(newp, newsize);
4881 set_all_inuse(newp);
4882 }
4883
4884 check_inuse_chunk(newp);
4885 guard_set(av->guard_stored, newp, bytes, nb);
4886 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
4887 return chunk(newp);
4888 }
4889
4890 /*
4891 Handle mmap cases
4892 */
4893
4894 else {
4895
4896#if HAVE_MREMAP
4897 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4898 size_t pagemask = av->pagesize - 1;
4899 char *cp;
4900 CHUNK_SIZE_T sum;
4901
4902
4903 /* Note the extra SIZE_SZ overhead */
4904 /* newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; */
4905 newsize = (nb + offset + pagemask) & ~pagemask;
4906
4907 /* don't need to remap if still within same page */
4908 if (oldsize == newsize - offset)
4909 {
4910 guard_set(av->guard_stored, oldp, bytes, nb);
4911 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4912 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
4913 return oldmem;
4914 }
4915
4916 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4917
4918 if (cp != (char*)MORECORE_FAILURE) {
4919
4920 hashtable_remove_mmapped(chunk(oldp));
4921
4922 oldp->chunk = (mchunkptr)(cp + offset);
4923 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4924
4925 hashtable_add(oldp);
4926
4927 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4928 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4929
4930 /* update statistics */
4931 sum = av->mmapped_mem += newsize - oldsize;
4932 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4933 av->max_mmapped_mem = sum;
4934 sum += av->sbrked_mem;
4935 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4936 av->max_total_mem = sum;
4937
4938 guard_set(av->guard_stored, oldp, bytes, nb);
4939 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4940 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
4941 return chunk(oldp);
4942 }
4943#endif /* have MREMAP */
4944
4945 /* Note the extra SIZE_SZ overhead. */
4946 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4947 newmem = oldmem; /* do nothing */
4948 else {
4949 /* Must alloc, copy, free. */
4950 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4951 if (newmem != 0) {
4952 MALLOC_COPY(newmem, oldmem, oldsize);
4953 fREe(oldmem);
4954 }
4955 }
4956 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
4957 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4958 return newmem;
4959 }
4960}
4961
4962/*
4963 ---------------------------posix_memalign ----------------------------
4964*/
4965
4966#if __STD_C
4967DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4968#else
4969DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4970#endif
4971{
4972 mstate av;
4973
4974 if (alignment % sizeof(void *) != 0)
4975 return EINVAL;
4976 if ((alignment & (alignment - 1)) != 0)
4977 return EINVAL;
4978
4979 av = get_malloc_state();
4980 if (!av || av->max_fast == 0) malloc_consolidate(av);
4981 *memptr = mEMALIGn(alignment, bytes);
4982
4983 return (*memptr != NULL ? 0 : ENOMEM);
4984}
4985
4986/*
4987 ------------------------------ memalign ------------------------------
4988*/
4989
4990#if __STD_C
4991DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
4992#else
4993DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4994#endif
4995{
4996 INTERNAL_SIZE_T nb; /* padded request size */
4997 char* m; /* memory returned by malloc call */
4998 chunkinfoptr p; /* corresponding chunk */
4999 char* brk; /* alignment point within p */
5000 chunkinfoptr newp; /* chunk to return */
5001 INTERNAL_SIZE_T newsize; /* its size */
5002 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5003 chunkinfoptr remainder; /* spare room at end to split off */
5004 CHUNK_SIZE_T remainder_size; /* its size */
5005 INTERNAL_SIZE_T size;
5006 mstate av;
5007
5008 /* If need less alignment than we give anyway, just relay to malloc */
5009
5010 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
5011
5012 /* Otherwise, ensure that it is at least a minimum chunk size */
5013
5014 if (alignment < MINSIZE) alignment = MINSIZE;
5015
5016 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5017 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5018 size_t a = MALLOC_ALIGNMENT * 2;
5019 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5020 alignment = a;
5021 }
5022
5023 checked_request2size(bytes, nb);
5024
5025 /*
5026 Strategy: find a spot within that chunk that meets the alignment
5027 request, and then possibly free the leading and trailing space.
5028 */
5029
5030
5031 /* Call malloc with worst case padding to hit alignment. */
5032
5033 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5034
5035 if (m == 0) return 0; /* propagate failure */
5036
5037 av = get_malloc_state();
5038
5039 p = hashtable_lookup((mchunkptr) m);
5040
5041 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5042
5043 /*
5044 Find an aligned spot inside chunk. Since we need to give back
5045 leading space in a chunk of at least MINSIZE, if the first
5046 calculation places us at a spot with less than MINSIZE leader,
5047 we can move to the next aligned spot -- we've allocated enough
5048 total room so that this is always possible.
5049 */
5050
5051 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5052 -((signed long) alignment)));
5053 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5054 brk += alignment;
5055
5056 newp = cireg_getfree();
5057 newp->chunk = (mchunkptr)brk;
5058 leadsize = brk - (char*)(chunk(p));
5059 newsize = chunksize(p) - leadsize;
5060
5061 /* For mmapped chunks, just adjust offset */
5062 if (UNLIKELY(chunk_is_mmapped(p))) {
5063 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5064 set_head(newp, newsize|IS_MMAPPED|INUSE);
5065 hashtable_remove_mmapped(chunk(p));
5066 freecilst_add(p);
5067 hashtable_add(newp);
5068 guard_set(av->guard_stored, newp, bytes, nb);
5069 return chunk(newp);
5070 }
5071
5072 /* Otherwise, give back leader, use the rest */
5073 set_head(newp, newsize | PREV_INUSE | INUSE);
5074 set_head_size(p, leadsize);
5075 set_all_inuse(newp);
5076 hashtable_add(newp); /* 20.05.2008 rw */
5077 guard_set(av->guard_stored, p, 0, leadsize);
5078 fREe(chunk(p));
5079 p = newp;
5080
5081 assert (newsize >= nb &&
5082 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5083 }
5084
5085 /* Also give back spare room at the end */
5086 if (!chunk_is_mmapped(p)) {
5087 size = chunksize(p);
5088 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5089 remainder = cireg_getfree();
5090 remainder_size = size - nb;
5091 remainder->chunk = chunk_at_offset(chunk(p), nb);
5092 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5093 set_head_size(p, nb);
5094 hashtable_add(remainder); /* 20.05.2008 rw */
5095 guard_set(av->guard_stored, remainder, 0, remainder_size);
5096 fREe(chunk(remainder));
5097 }
5098 }
5099
5100 check_inuse_chunk(p);
5101 guard_set(av->guard_stored, p, bytes, nb);
5102 return chunk(p);
5103}
5104
5105/*
5106 ------------------------------ calloc ------------------------------
5107*/
5108
5109#if __STD_C
5110DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5111#else
5112DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5113#endif
5114{
5115 chunkinfoptr p;
5116 CHUNK_SIZE_T clearsize;
5117 CHUNK_SIZE_T nclears;
5118 INTERNAL_SIZE_T* d;
5119 Void_t* mem;
5120
5121
5122 mem = mALLOc(n_elements * elem_size);
5123
5124 if (mem != 0) {
5125 p = hashtable_lookup(mem);
5126
5127 if (!chunk_is_mmapped(p))
5128 {
5129 /*
5130 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5131 We know that contents have an even number of
5132 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5133 */
5134
5135 d = (INTERNAL_SIZE_T*)mem;
5136 clearsize = chunksize(p);
5137 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5138 assert(nclears >= 2);
5139
5140 if (nclears > 10) {
5141 MALLOC_ZERO(d, clearsize);
5142 }
5143
5144 else {
5145 *(d+0) = 0;
5146 *(d+1) = 0;
5147 if (nclears > 2) {
5148 *(d+2) = 0;
5149 *(d+3) = 0;
5150 if (nclears > 4) {
5151 *(d+4) = 0;
5152 *(d+5) = 0;
5153 if (nclears > 6) {
5154 *(d+6) = 0;
5155 *(d+7) = 0;
5156 if (nclears > 8) {
5157 *(d+8) = 0;
5158 *(d+9) = 0;
5159 }
5160 }
5161 }
5162 }
5163 }
5164 }
5165#if ! MMAP_CLEARS
5166 else
5167 {
5168 d = (INTERNAL_SIZE_T*)mem;
5169 clearsize = chunksize(p);
5170 MALLOC_ZERO(d, clearsize);
5171 }
5172#endif
5173 /* Set guard again, since we just cleared it
5174 */
5175 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5176 }
5177
5178 return mem;
5179}
5180
5181/*
5182 ------------------------------ valloc ------------------------------
5183*/
5184
5185#if __STD_C
5186DL_STATIC Void_t* vALLOc(size_t bytes)
5187#else
5188DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5189#endif
5190{
5191 /* Ensure initialization */
5192 mstate av = get_malloc_state();
5193 if (!av || av->max_fast == 0) {
5194 malloc_consolidate(av);
5195 av = get_malloc_state();
5196 }
5197 return mEMALIGn(av->pagesize, bytes);
5198}
5199
5200/*
5201 ------------------------------ pvalloc ------------------------------
5202*/
5203
5204
5205#if __STD_C
5206DL_STATIC Void_t* pVALLOc(size_t bytes)
5207#else
5208DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5209#endif
5210{
5211 mstate av = get_malloc_state();
5212 size_t pagesz;
5213
5214 /* Ensure initialization */
5215 if (!av || av->max_fast == 0) {
5216 malloc_consolidate(av);
5217 av = get_malloc_state();
5218 }
5219 pagesz = av->pagesize;
5220 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5221}
5222
5223
5224/*
5225 ------------------------------ malloc_trim ------------------------------
5226*/
5227
5228#if __STD_C
5229DL_STATIC int mTRIm(size_t pad)
5230#else
5231DL_STATIC int mTRIm(pad) size_t pad;
5232#endif
5233{
5234 mstate av = get_malloc_state();
5235 /* Ensure initialization/consolidation */
5236 malloc_consolidate(av);
5237 av = get_malloc_state();
5238#ifndef MORECORE_CANNOT_TRIM
5239 if (morecore32bit(av))
5240 return sYSTRIm(pad, av);
5241 else
5242 return 0;
5243#else
5244 return 0;
5245#endif
5246}
5247
5248
5249
5250/*
5251 ------------------------- malloc_usable_size -------------------------
5252*/
5253
5254#if __STD_C
5255DL_STATIC size_t mUSABLe(Void_t* mem)
5256#else
5257DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5258#endif
5259{
5260 chunkinfoptr p;
5261 if (mem != 0) {
5262 p = hashtable_lookup(mem);
5263 if (p && inuse(p)) return chunksize(p);
5264 }
5265 return 0;
5266}
5267
5268/*
5269 ------------------------------ mallinfo ------------------------------
5270*/
5271
5272DL_STATIC struct mallinfo mALLINFo()
5273{
5274 mstate av = get_malloc_state();
5275 struct mallinfo mi;
5276 unsigned int i;
5277 mbinptr b;
5278 chunkinfoptr p;
5279 INTERNAL_SIZE_T avail;
5280 INTERNAL_SIZE_T fastavail;
5281 int nblocks;
5282 int nfastblocks;
5283
5284 /* Ensure initialization */
5285 if (!av || av->top == 0) {
5286 malloc_consolidate(av);
5287 av = get_malloc_state();
5288 }
5289 check_malloc_state();
5290
5291 /* Account for top */
5292 avail = chunksize(av->top);
5293 nblocks = 1; /* top always exists */
5294
5295 /* traverse fastbins */
5296 nfastblocks = 0;
5297 fastavail = 0;
5298
5299 for (i = 0; i < NFASTBINS; ++i) {
5300 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5301 ++nfastblocks;
5302 fastavail += chunksize(p);
5303 }
5304 }
5305
5306 avail += fastavail;
5307
5308 /* traverse regular bins */
5309 for (i = 1; i < NBINS; ++i) {
5310 b = bin_at(av, i);
5311 for (p = last(b); p != b; p = p->bk) {
5312 ++nblocks;
5313 avail += chunksize(p);
5314 }
5315 }
5316
5317 mi.smblks = nfastblocks;
5318 mi.ordblks = nblocks;
5319 mi.fordblks = avail;
5320 mi.uordblks = av->sbrked_mem - avail;
5321 mi.arena = av->sbrked_mem;
5322 mi.hblks = av->n_mmaps;
5323 mi.hblkhd = av->mmapped_mem;
5324 mi.fsmblks = fastavail;
5325 mi.keepcost = chunksize(av->top);
5326 mi.usmblks = av->max_total_mem;
5327 return mi;
5328}
5329
5330/*
5331 ------------------------------ malloc_stats ------------------------------
5332*/
5333
5334DL_STATIC void mSTATs()
5335{
5336 struct mallinfo mi = mALLINFo();
5337
5338 fprintf(stderr, "hashtable = %10lu MB\n",
5339 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5340 fprintf(stderr, "max system bytes = %10lu\n",
5341 (CHUNK_SIZE_T)(mi.usmblks));
5342 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5343 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5344 (CHUNK_SIZE_T)(mi.arena),
5345 (CHUNK_SIZE_T)(mi.hblkhd));
5346 fprintf(stderr, "in use bytes = %10lu\n",
5347 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5348
5349}
5350
5351
5352/*
5353 ------------------------------ mallopt ------------------------------
5354*/
5355
5356#if __STD_C
5357DL_STATIC int mALLOPt(int param_number, int value)
5358#else
5359DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5360#endif
5361{
5362 mstate av = get_malloc_state();
5363 /* Ensure initialization/consolidation */
5364 malloc_consolidate(av);
5365 av = get_malloc_state();
5366
5367 switch(param_number) {
5368 case M_MXFAST:
5369 if (value >= 0 && value <= MAX_FAST_SIZE) {
5370 set_max_fast(av, value);
5371 return 1;
5372 }
5373 else
5374 return 0;
5375
5376 case M_TRIM_THRESHOLD:
5377 av->trim_threshold = value;
5378 return 1;
5379
5380 case M_TOP_PAD:
5381 av->top_pad = value;
5382 return 1;
5383
5384 case M_MMAP_THRESHOLD:
5385 av->mmap_threshold = value;
5386 return 1;
5387
5388 case M_MMAP_MAX:
5389 if (value != 0)
5390 return 0;
5391 av->n_mmaps_max = value;
5392 return 1;
5393
5394 default:
5395 return 0;
5396 }
5397}
5398
5399
5400/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5401
5402/*
5403 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5404 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5405 *
5406 * Permission to use, copy, modify, and distribute this software for any
5407 * purpose with or without fee is hereby granted, provided that the above
5408 * copyright notice and this permission notice appear in all copies.
5409 *
5410 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5411 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5412 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5413 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5414 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5415 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5416 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5417 */
5418
5419/*
5420 * Arc4 random number generator for OpenBSD.
5421 *
5422 * This code is derived from section 17.1 of Applied Cryptography,
5423 * second edition, which describes a stream cipher allegedly
5424 * compatible with RSA Labs "RC4" cipher (the actual description of
5425 * which is a trade secret). The same algorithm is used as a stream
5426 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5427 *
5428 * Here the stream cipher has been modified always to include the time
5429 * when initializing the state. That makes it impossible to
5430 * regenerate the same random sequence twice, so this can't be used
5431 * for encryption, but will generate good random numbers.
5432 *
5433 * RC4 is a registered trademark of RSA Laboratories.
5434 */
5435
5436/* Moved u_int8_t -> unsigned char (portability)
5437 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5438 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5439 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5440 * This is arc4random(3) using urandom.
5441 */
5442
5443#include <fcntl.h>
5444#include <limits.h>
5445#include <stdlib.h>
5446#include <sys/param.h>
5447#include <sys/time.h>
5448
5449struct arc4_stream {
5450 unsigned char i;
5451 unsigned char j;
5452 unsigned char s[256];
5453};
5454
5455static int rs_initialized;
5456static struct arc4_stream rs;
5457static pid_t arc4_stir_pid;
5458static int arc4_count;
5459
5460static unsigned char arc4_getbyte(void);
5461
5462static void
5463arc4_init(void)
5464{
5465 int n;
5466
5467 for (n = 0; n < 256; n++)
5468 rs.s[n] = n;
5469 rs.i = 0;
5470 rs.j = 0;
5471}
5472
5473static inline void
5474arc4_addrandom(unsigned char *dat, int datlen)
5475{
5476 int n;
5477 unsigned char si;
5478
5479 rs.i--;
5480 for (n = 0; n < 256; n++) {
5481 rs.i = (rs.i + 1);
5482 si = rs.s[rs.i];
5483 rs.j = (rs.j + si + dat[n % datlen]);
5484 rs.s[rs.i] = rs.s[rs.j];
5485 rs.s[rs.j] = si;
5486 }
5487 rs.j = rs.i;
5488}
5489
5490#ifdef HAVE_SCHED_H
5491#include <sched.h>
5492#endif
5493
5494static void
5495arc4_stir(void)
5496{
5497 int i;
5498 struct {
5499 struct timeval tv1;
5500 struct timeval tv2;
5501 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5502 } rdat;
5503#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5504 size_t sz = 0;
5505 int fd;
5506#endif
5507
5508 gettimeofday(&rdat.tv1, NULL);
5509
5510
5511 if (!rs_initialized) {
5512 arc4_init();
5513 rs_initialized = 1;
5514 }
5515
5516#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5517
5518#ifdef HAVE_SCHED_YIELD
5519 /* Yield the processor to introduce some random delay. */
5520 (void) sched_yield();
5521#endif
5522
5523 /*
5524 * Pthread problem in multithreaded code on *BSD.
5525 */
5526 fd = open("/dev/urandom", O_RDONLY);
5527 if (fd != -1) {
5528 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5529 close(fd);
5530 }
5531 if (sz > sizeof (rdat.rnd))
5532 sz = 0;
5533 #endif
5534
5535 arc4_stir_pid = getpid();
5536 gettimeofday(&rdat.tv2, NULL);
5537
5538 arc4_addrandom((void *)&rdat, sizeof(rdat));
5539
5540 /*
5541 * Discard early keystream, as per recommendations in:
5542 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5543 */
5544 for (i = 0; i < 256; i++)
5545 (void)arc4_getbyte();
5546 arc4_count = 1600000;
5547}
5548
5549static unsigned char
5550arc4_getbyte(void)
5551{
5552 unsigned char si, sj;
5553
5554 rs.i = (rs.i + 1);
5555 si = rs.s[rs.i];
5556 rs.j = (rs.j + si);
5557 sj = rs.s[rs.j];
5558 rs.s[rs.i] = sj;
5559 rs.s[rs.j] = si;
5560 return (rs.s[(si + sj) & 0xff]);
5561}
5562
5563
5564 /* Changed to return char* */
5565static char *
5566dnmalloc_arc4random(void)
5567{
5568 static char val[4];
5569
5570 /* We only call this once, hence no need for locking. */
5571
5572 /* _ARC4_LOCK(); */
5573 arc4_count -= 4;
5574 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5575 arc4_stir();
5576
5577 val[0] = (char) arc4_getbyte();
5578 val[1] = (char) arc4_getbyte();
5579 val[2] = (char) arc4_getbyte();
5580 val[3] = (char) arc4_getbyte();
5581
5582 arc4_stir();
5583 /* _ARC4_UNLOCK(); */
5584 return val;
5585}
5586
5587#else
5588int dnmalloc_pthread_init() { return 0; }
5589#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.