source: trunk/src/dnmalloc.c@ 226

Last change on this file since 226 was 187, checked in by katerina, 16 years ago

Fix inconsistent chunksize on 64bit systems (ticket #125).

File size: 164.0 KB
RevLine 
[171]1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
[172]2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
[171]5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
8 Yves Younan
9 Celestijnenlaan 200A
10 B-3001 Heverlee
11 Belgium
12
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 2.1 of the License, or (at your option) any later version.
17
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26
27*/
28
[172]29/* Current version: dnmalloc 1.0 */
[171]30/* Includes arc4random from OpenBSD, which is under the BDS license */
31
32/* Versions:
33 0.1-0.5:
34 Proof of concept implementation by Hans Van den Eynden and Yves Younan
35 0.6-0.7:
36 Bug fixes by Yves Younan
37 0.8-1.0.beta4:
38 Reimplementation from scratch by Yves Younan
39 1.0.beta4:
40 Public release
41 1.0.beta5:
42 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
43 A freechunkinfo region is now freed when it is completely empty and
44 not the current one
45
46 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
47 ---------------------
48
49 Compiler warnings fixed
50 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
51 (and what the standard says)
52 Removed unused code
53 Fix assert(aligned_OK(chunk(newp)));
54 -> assert(aligned_OK(chunk(oldp)));
55 Fix statistics in sYSMALLOc
56 Fix overwrite of av->top in sYSMALLOc
57 Provide own assert(), glibc assert() doesn't work (calls malloc)
58 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
59 Remove cfree, independent_cmalloc, independent_comalloc (untested
60 public functions not covered by any standard)
61 Provide posix_memalign (that one is in the standard)
62 Move the malloc_state struct to mmapped memory protected by guard pages
63 Add arc4random function to initialize random canary on startup
64 Implement random canary at end of (re|m)alloced/memaligned buffer,
65 check at free/realloc
66 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
67 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
68
69 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
70 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
71 checked_request2size (gcc predicts if{} branch to be taken).
72 Use UNLIKELY macro (gcc __builtin_expect()) where branch
73 reordering would make the code awkward.
74
75 Portability: Hashtable always covers full 32bit address space to
76 avoid assumptions about memory layout.
77 Portability: Try hard to enforce mapping of mmapped memory into
78 32bit address space, even on 64bit systems.
79 Portability: Provide a dnmalloc_pthread_init() function, since
80 pthread locking on HP-UX only works if initialized
81 after the application has entered main().
82 Portability: On *BSD, pthread_mutex_lock is unusable since it
83 calls malloc, use spinlocks instead.
84 Portability: Dynamically detect whether the heap is within
85 32bit address range (e.g. on Linux x86_64, it isn't).
86 Don't use sbrk() if the heap is mapped to an address
87 outside the 32bit range, since this doesn't work with
88 the hashtable. New macro morecore32bit.
89
90 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
91 FreeBSD/pthread, and Solaris 10 i386/pthread.
92 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
93 might be related to OpenBSD pthread internals (??).
94 Non-treaded version (#undef USE_MALLOC_LOC)
95 works on OpenBSD.
96
[172]97 further to 1.0:
98 Valgrind client requests inserted (#define USE_VALGRIND)
[173]99 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
[172]100 Portability: minsize = 32 bit on 64bit architecture
101 Minor cleanups
[173]102 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
[172]103
[171]104 There may be some bugs left in this version. please use with caution.
105*/
106
107
108
109/* Please read the following papers for documentation:
110
111 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
112 Countermeasures against Current and Future Code Injection Attacks,
113 Proceedings of the Third IEEE International Information Assurance
114 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
115 IEEE, IEEE Press.
116 http://www.fort-knox.org/younany_countermeasures.pdf
117
118 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
119 Eynden. Security of Memory Allocators for C and C++. Technical Report
120 CW419, Departement Computerwetenschappen, Katholieke Universiteit
121 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
122
123 */
124
125/* Compile:
126 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
127 "Link":
128 Dynamic:
129 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
130 Static:
131 ar -rv libdnmalloc.a dnmalloc-portable.o
132
133*/
134
135/*
136 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
137 dlmalloc was released as public domain and contained the following license:
138
139 "This is a version (aka dlmalloc) of malloc/free/realloc written by
140 Doug Lea and released to the public domain. Use, modify, and
141 redistribute this code without permission or acknowledgement in any
142 way you wish. Send questions, comments, complaints, performance
143 data, etc to dl@cs.oswego.edu
144
145 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
146
147 Note: There may be an updated version of this malloc obtainable at
148 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
149 Check before installing!"
150
151*/
152
153/* The following preprocessor macros are tested,
154 * and hence should have #define directives:
155 *
156 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
157 *
158 * HAVE_UNISTD_H Define to #include <unistd.h>
159 *
160 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
161 * HAVE_WRITEV Define if the 'writev' function is available
162 *
163 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
164 *
165 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
166 *
167 * HAVE_FCNTL_H Define to #include <fcntl.h>
168 *
169 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
170 * HAVE_MMAP Define if the 'mmap' function is available.
171 *
172 * HAVE_SCHED_H Define to #include <sched.h>
173 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
174 */
175
176
177/*
178 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
179 compiler, or a C compiler sufficiently close to ANSI to get away
180 with it.
181*/
182
183#ifdef HAVE_CONFIG_H
184#include "config.h"
185#endif
186
[172]187#ifdef USE_VALGRIND
188#include <valgrind/memcheck.h>
189#else
190#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
191#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
192#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
193#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
194#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
195#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
196#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
197#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
198#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
199#endif
200
[171]201#if defined (__GNUC__) && __GNUC__ > 2
202# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
203# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
204# define __attribute_malloc__ __attribute__ ((__malloc__))
205#else
206# define LIKELY(x) (x)
207# define UNLIKELY(x) (x)
208# define __attribute_malloc__ /* Ignore */
209#endif
210
211/*
212 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
213 large blocks. This is currently only possible on Linux with
214 kernel versions newer than 1.3.77.
215*/
216
217#ifndef HAVE_MREMAP
218#ifdef linux
219#define HAVE_MREMAP 1
220#define _GNU_SOURCE
221#else
222#define HAVE_MREMAP 0
223#endif
224#endif /* HAVE_MREMAP */
225
226
227
228#ifndef __STD_C
229#if defined(__STDC__) || defined(_cplusplus)
230#define __STD_C 1
231#else
232#define __STD_C 0
233#endif
234#endif /*__STD_C*/
235
236
237/*
238 Void_t* is the pointer type that malloc should say it returns
239*/
240
241#ifndef Void_t
242#if (__STD_C || defined(WIN32))
243#define Void_t void
244#else
245#define Void_t char
246#endif
247#endif /*Void_t*/
248
249#if __STD_C
250#include <stddef.h> /* for size_t */
251#else
252#include <sys/types.h>
253#endif
254
255#if !defined(USE_SYSTEM_MALLOC)
256
257#ifdef __cplusplus
258extern "C" {
259#endif
260
261/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
262
263#ifdef HAVE_UNISTD_H
264#include <unistd.h>
265#endif
266
267#ifdef HAVE_SYS_UIO_H
268#include <sys/uio.h>
269#endif
270
271#include <stdio.h> /* needed for malloc_stats */
272#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
273
274#include <string.h>
275#include <stdlib.h>
276
277#include <sys/resource.h>
278
279extern int errno;
280
281 /* 0: lazy,
282 * 1: medium (assertions compiled in),
283 * 2: high (guard pages at end of hash table and ciregions)
284 * 3: paranoid (guards at end of each allocated chunk, check at free)
285 */
286#ifndef PARANOIA
287#define PARANOIA 9
288#endif
289
290 /* Using assert() with multithreading will cause the code
291 * to deadlock since glibc __assert_fail will call malloc().
292 * We need our very own assert().
293 */
294typedef void assert_handler_tp(const char * error, const char *file, int line);
295
296#if PARANOIA > 0
297
298#ifdef NDEBUG
299#undef NDEBUG
300#endif
301
302static void default_assert_handler(const char *error,
303 const char *file, int line)
304{
305#ifdef HAVE_WRITEV
306 struct iovec iov[5];
307 char * i1 = "assertion failed (";
308 char * i3 = "): ";
309 char * i5 = "\n";
310
311 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
312 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
313 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
314 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
315 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
316 writev(STDERR_FILENO, iov, 5);
317#else
318 fputs("assertion failed (", stderr);
319 fputs(file, stderr);
320 fputs("): ", stderr);
321 fputs(error, stderr);
322 fputc('\n', stderr);
323#endif
[172]324 (void) line;
[171]325 abort();
326}
327static assert_handler_tp *assert_handler = default_assert_handler;
328
[172]329
[171]330#define assert(x) \
331 do { \
332 if (UNLIKELY(!(x))) { \
333 assert_handler(#x, __FILE__, __LINE__); \
334 } \
335 } while (0)
336
337#else
338
339static assert_handler_tp *assert_handler = NULL;
340#define NDEBUG
341#define assert(x) ((void)0)
342
343#endif
344
345assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
346{
347 assert_handler_tp *old = assert_handler;
348 assert_handler = new;
349 return old;
350}
351
352
353#include <stdarg.h>
354
355 /* define for debugging */
356 /* #define DNMALLOC_DEBUG */
357
[172]358 /* Do some extra checks? if not, covered by assrt()s */
[171]359 /* #define DNMALLOC_CHECKS */
360
361 /*
362 The unsigned integer type used for comparing any two chunk sizes.
363 This should be at least as wide as size_t, but should not be signed.
364 */
365
366#ifndef CHUNK_SIZE_T
367#define CHUNK_SIZE_T unsigned long
368#endif
369
370/*
371 The unsigned integer type used to hold addresses when they are are
372 manipulated as integers. Except that it is not defined on all
373 systems, intptr_t would suffice.
374*/
375#ifndef PTR_UINT
376#define PTR_UINT unsigned long
377#endif
378
379
380/*
381 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
382 of chunk sizes.
383
384 The default version is the same as size_t.
385
386 While not strictly necessary, it is best to define this as an
387 unsigned type, even if size_t is a signed type. This may avoid some
388 artificial size limitations on some systems.
389
390 On a 64-bit machine, you may be able to reduce malloc overhead by
391 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
392 expense of not being able to handle more than 2^32 of malloced
393 space. If this limitation is acceptable, you are encouraged to set
394 this unless you are on a platform requiring 16byte alignments. In
395 this case the alignment requirements turn out to negate any
396 potential advantages of decreasing size_t word size.
397
398 Implementors: Beware of the possible combinations of:
399 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
400 and might be the same width as int or as long
401 - size_t might have different width and signedness as INTERNAL_SIZE_T
402 - int and long might be 32 or 64 bits, and might be the same width
403 To deal with this, most comparisons and difference computations
404 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
405 aware of the fact that casting an unsigned int to a wider long does
406 not sign-extend. (This also makes checking for negative numbers
407 awkward.) Some of these casts result in harmless compiler warnings
408 on some systems.
409*/
410
411#ifndef INTERNAL_SIZE_T
412#define INTERNAL_SIZE_T size_t
413#endif
414
415/* The corresponding word size */
416#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
417
418
419
420/*
421 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
422 It must be a power of two at least 2 * SIZE_SZ, even on machines
423 for which smaller alignments would suffice. It may be defined as
424 larger than this though. Note however that code and data structures
425 are optimized for the case of 8-byte alignment.
426*/
427
428
429#ifndef MALLOC_ALIGNMENT
430#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
431#endif
432
433/* The corresponding bit mask value */
434#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
435
436
437
438/*
439 REALLOC_ZERO_BYTES_FREES should be set if a call to
440 realloc with zero bytes should be the same as a call to free.
441 Some people think it should. Otherwise, since this malloc
442 returns a unique pointer for malloc(0), so does realloc(p, 0).
443*/
444
445#define REALLOC_ZERO_BYTES_FREES
446
447/*
448 TRIM_FASTBINS controls whether free() of a very small chunk can
449 immediately lead to trimming. Setting to true (1) can reduce memory
450 footprint, but will almost always slow down programs that use a lot
451 of small chunks.
452
453 Define this only if you are willing to give up some speed to more
454 aggressively reduce system-level memory footprint when releasing
455 memory in programs that use many small chunks. You can get
456 essentially the same effect by setting MXFAST to 0, but this can
457 lead to even greater slowdowns in programs using many small chunks.
458 TRIM_FASTBINS is an in-between compile-time option, that disables
459 only those chunks bordering topmost memory from being placed in
460 fastbins.
461*/
462
463#ifndef TRIM_FASTBINS
464#define TRIM_FASTBINS 0
465#endif
466
467
468/*
469 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
470 This is necessary when you only want to use this malloc in one part
471 of a program, using your regular system malloc elsewhere.
472*/
473
474/* #define USE_DL_PREFIX */
475
476
477/*
478 USE_MALLOC_LOCK causes wrapper functions to surround each
479 callable routine with pthread mutex lock/unlock.
480
481 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
482*/
483
484/* #define USE_MALLOC_LOCK */
485
486
487/*
488 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
489 actually a wrapper function that first calls MALLOC_PREACTION, then
490 calls the internal routine, and follows it with
491 MALLOC_POSTACTION. This is needed for locking, but you can also use
492 this, without USE_MALLOC_LOCK, for purposes of interception,
493 instrumentation, etc. It is a sad fact that using wrappers often
494 noticeably degrades performance of malloc-intensive programs.
495*/
496
497
498#ifdef USE_MALLOC_LOCK
499#define USE_PUBLIC_MALLOC_WRAPPERS
500#else
501/* #define USE_PUBLIC_MALLOC_WRAPPERS */
502#endif
503
504
505/*
506 Two-phase name translation.
507 All of the actual routines are given mangled names.
508 When wrappers are used, they become the public callable versions.
509 When DL_PREFIX is used, the callable names are prefixed.
510*/
511
512#ifndef USE_PUBLIC_MALLOC_WRAPPERS
513#define cALLOc public_cALLOc
514#define fREe public_fREe
515#define mALLOc public_mALLOc
516#define mEMALIGn public_mEMALIGn
517#define posix_mEMALIGn public_posix_mEMALIGn
518#define rEALLOc public_rEALLOc
519#define vALLOc public_vALLOc
520#define pVALLOc public_pVALLOc
521#define mALLINFo public_mALLINFo
522#define mALLOPt public_mALLOPt
523#define mTRIm public_mTRIm
524#define mSTATs public_mSTATs
525#define mUSABLe public_mUSABLe
526#endif
527
528#ifdef USE_DL_PREFIX
529#define public_cALLOc dlcalloc
530#define public_fREe dlfree
531#define public_mALLOc dlmalloc
532#define public_mEMALIGn dlmemalign
533#define public_posix_mEMALIGn dlposix_memalign
534#define public_rEALLOc dlrealloc
535#define public_vALLOc dlvalloc
536#define public_pVALLOc dlpvalloc
537#define public_mALLINFo dlmallinfo
538#define public_mALLOPt dlmallopt
539#define public_mTRIm dlmalloc_trim
540#define public_mSTATs dlmalloc_stats
541#define public_mUSABLe dlmalloc_usable_size
542#else /* USE_DL_PREFIX */
543#define public_cALLOc calloc
544#define public_fREe free
545#define public_mALLOc malloc
546#define public_mEMALIGn memalign
547#define public_posix_mEMALIGn posix_memalign
548#define public_rEALLOc realloc
549#define public_vALLOc valloc
550#define public_pVALLOc pvalloc
551#define public_mALLINFo mallinfo
552#define public_mALLOPt mallopt
553#define public_mTRIm malloc_trim
554#define public_mSTATs malloc_stats
555#define public_mUSABLe malloc_usable_size
556#endif /* USE_DL_PREFIX */
557
558
559/*
560 HAVE_MEMCPY should be defined if you are not otherwise using
561 ANSI STD C, but still have memcpy and memset in your C library
562 and want to use them in calloc and realloc. Otherwise simple
563 macro versions are defined below.
564
565 USE_MEMCPY should be defined as 1 if you actually want to
566 have memset and memcpy called. People report that the macro
567 versions are faster than libc versions on some systems.
568
569 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
570 (of <= 36 bytes) are manually unrolled in realloc and calloc.
571*/
572
573#ifndef HAVE_MEMCPY
574#define HAVE_MEMCPY
575#endif
576
577#ifndef USE_MEMCPY
578#ifdef HAVE_MEMCPY
579#define USE_MEMCPY 1
580#else
581#define USE_MEMCPY 0
582#endif
583#endif
584
585
586#if (__STD_C || defined(HAVE_MEMCPY))
587
588#ifdef WIN32
[173]589 /* On Win32 memset and memcpy are already declared in windows.h */
[171]590#else
591#if __STD_C
[173]592 /* Defined in string.h */
[171]593#else
594Void_t* memset();
595Void_t* memcpy();
596#endif
597#endif
598#endif
599
600/*
601 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
602 malloc fails to be able to return memory, either because memory is
603 exhausted or because of illegal arguments.
604
605 By default, sets errno if running on STD_C platform, else does nothing.
606*/
607
608#ifndef MALLOC_FAILURE_ACTION
609#if __STD_C
610#define MALLOC_FAILURE_ACTION \
611 errno = ENOMEM;
612
613#else
614#define MALLOC_FAILURE_ACTION
615#endif
616#endif
617
618/*
619 MORECORE-related declarations. By default, rely on sbrk
620*/
621
622
623#if !defined(HAVE_UNISTD_H)
624#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
625#if __STD_C
626extern Void_t* sbrk(ptrdiff_t);
627#else
628extern Void_t* sbrk();
629#endif
630#endif
631#endif
632
633/*
634 MORECORE_FAILURE is the value returned upon failure of MORECORE
635 as well as mmap. Since it cannot be an otherwise valid memory address,
636 and must reflect values of standard sys calls, you probably ought not
637 try to redefine it.
638*/
639
640#ifndef MORECORE_FAILURE
641#define MORECORE_FAILURE ((void*)(-1UL))
642#endif
643
644/*
645 MORECORE is the name of the routine to call to obtain more memory
646 from the system. See below for general guidance on writing
647 alternative MORECORE functions, as well as a version for WIN32 and a
648 sample version for pre-OSX macos.
649*/
650
651#ifndef MORECORE
652#define MORECORE sbrk
653#endif
654
655
656/*
657 If MORECORE_CONTIGUOUS is true, take advantage of fact that
658 consecutive calls to MORECORE with positive arguments always return
659 contiguous increasing addresses. This is true of unix sbrk. Even
660 if not defined, when regions happen to be contiguous, malloc will
661 permit allocations spanning regions obtained from different
662 calls. But defining this when applicable enables some stronger
663 consistency checks and space efficiencies.
664*/
665
666#ifndef MORECORE_CONTIGUOUS
667#define MORECORE_CONTIGUOUS 1
668#endif
669
670/*
671 Define MORECORE_CANNOT_TRIM if your version of MORECORE
672 cannot release space back to the system when given negative
673 arguments. This is generally necessary only if you are using
674 a hand-crafted MORECORE function that cannot handle negative arguments.
675*/
676
677/* #define MORECORE_CANNOT_TRIM */
678
679
680/*
681 This malloc requires mmap for heap management data. It is an error
682 if mmap is not available.
683
684 Additionally, mmap will be used to satisfy large requests.
685*/
686
687#ifndef HAVE_MMAP
[172]688# error HAVE_MMAP not defined, has your operating system mmap?
[171]689#endif
690
691/*
692 Standard unix mmap using /dev/zero clears memory so calloc doesn't
693 need to.
694*/
695
696#ifndef MMAP_CLEARS
697#define MMAP_CLEARS 1
698#endif
699
700
701/*
702 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
703 sbrk fails, and mmap is used as a backup (which is done only if
704 HAVE_MMAP). The value must be a multiple of page size. This
705 backup strategy generally applies only when systems have "holes" in
706 address space, so sbrk cannot perform contiguous expansion, but
707 there is still space available on system. On systems for which
708 this is known to be useful (i.e. most linux kernels), this occurs
709 only when programs allocate huge amounts of memory. Between this,
710 and the fact that mmap regions tend to be limited, the size should
711 be large, to avoid too many mmap calls and thus avoid running out
712 of kernel resources.
713*/
714
715#ifndef MMAP_AS_MORECORE_SIZE
716#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
717#endif
718
719
720/*
721 The system page size. To the extent possible, this malloc manages
722 memory from the system in page-size units. Note that this value is
723 cached during initialization into a field of malloc_state. So even
724 if malloc_getpagesize is a function, it is only called once.
725
726 The following mechanics for getpagesize were adapted from bsd/gnu
727 getpagesize.h. If none of the system-probes here apply, a value of
728 4096 is used, which should be OK: If they don't apply, then using
729 the actual value probably doesn't impact performance.
730*/
731
732
733#ifndef malloc_getpagesize
734
735# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
736# ifndef _SC_PAGE_SIZE
737# define _SC_PAGE_SIZE _SC_PAGESIZE
738# endif
739# endif
740
741# ifdef _SC_PAGE_SIZE
742# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
743# else
744# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
745 extern size_t getpagesize();
746# define malloc_getpagesize getpagesize()
747# else
748# ifdef WIN32 /* use supplied emulation of getpagesize */
749# define malloc_getpagesize getpagesize()
750# else
751# if defined(HAVE_SYS_PARAM_H)
752# include <sys/param.h>
753# endif
754# ifdef EXEC_PAGESIZE
755# define malloc_getpagesize EXEC_PAGESIZE
756# else
757# ifdef NBPG
758# ifndef CLSIZE
759# define malloc_getpagesize NBPG
760# else
761# define malloc_getpagesize (NBPG * CLSIZE)
762# endif
763# else
764# ifdef NBPC
765# define malloc_getpagesize NBPC
766# else
767# ifdef PAGESIZE
768# define malloc_getpagesize PAGESIZE
769# else /* just guess */
770# define malloc_getpagesize (4096)
771# endif
772# endif
773# endif
774# endif
775# endif
776# endif
777# endif
778#endif
779
780/*
781 This version of malloc supports the standard SVID/XPG mallinfo
782 routine that returns a struct containing usage properties and
783 statistics. It should work on any SVID/XPG compliant system that has
784 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
785 install such a thing yourself, cut out the preliminary declarations
786 as described above and below and save them in a malloc.h file. But
787 there's no compelling reason to bother to do this.)
788
789 The main declaration needed is the mallinfo struct that is returned
790 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
791 bunch of fields that are not even meaningful in this version of
792 malloc. These fields are are instead filled by mallinfo() with
793 other numbers that might be of interest.
794
795 HAVE_MALLOC_H should be set if you have a
796 /usr/include/malloc.h file that includes a declaration of struct
797 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
798 version is declared below. These must be precisely the same for
799 mallinfo() to work. The original SVID version of this struct,
800 defined on most systems with mallinfo, declares all fields as
801 ints. But some others define as unsigned long. If your system
802 defines the fields using a type of different width than listed here,
803 you must #include your system version and #define
804 HAVE_MALLOC_H.
805*/
806
807/* #define HAVE_MALLOC_H */
808
809/* On *BSD, malloc.h is deprecated, and on some *BSD including
810 * it may actually raise an error.
811 */
812#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
813#include <malloc.h>
814#else
815
816/* SVID2/XPG mallinfo structure */
817
818struct mallinfo {
819 int arena; /* non-mmapped space allocated from system */
820 int ordblks; /* number of free chunks */
821 int smblks; /* number of fastbin blocks */
822 int hblks; /* number of mmapped regions */
823 int hblkhd; /* space in mmapped regions */
824 int usmblks; /* maximum total allocated space */
825 int fsmblks; /* space available in freed fastbin blocks */
826 int uordblks; /* total allocated space */
827 int fordblks; /* total free space */
828 int keepcost; /* top-most, releasable (via malloc_trim) space */
829};
830
831/*
832 SVID/XPG defines four standard parameter numbers for mallopt,
833 normally defined in malloc.h. Only one of these (M_MXFAST) is used
834 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
835 so setting them has no effect. But this malloc also supports other
836 options in mallopt described below.
837*/
838#endif
839
840
841/* ---------- description of public routines ------------ */
842
843/*
844 malloc(size_t n)
845 Returns a pointer to a newly allocated chunk of at least n bytes, or null
846 if no space is available. Additionally, on failure, errno is
847 set to ENOMEM on ANSI C systems.
848
849 If n is zero, malloc returns a minumum-sized chunk. (The minimum
850 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
851 systems.) On most systems, size_t is an unsigned type, so calls
852 with negative arguments are interpreted as requests for huge amounts
853 of space, which will often fail. The maximum supported value of n
854 differs across systems, but is in all cases less than the maximum
855 representable value of a size_t.
856*/
857#if __STD_C
858Void_t* public_mALLOc(size_t) __attribute_malloc__;
859#else
860Void_t* public_mALLOc();
861#endif
862
863/*
864 free(Void_t* p)
865 Releases the chunk of memory pointed to by p, that had been previously
866 allocated using malloc or a related routine such as realloc.
867 It has no effect if p is null. It can have arbitrary (i.e., bad!)
868 effects if p has already been freed.
869
870 Unless disabled (using mallopt), freeing very large spaces will
871 when possible, automatically trigger operations that give
872 back unused memory to the system, thus reducing program footprint.
873*/
874#if __STD_C
875void public_fREe(Void_t*);
876#else
877void public_fREe();
878#endif
879
880/*
881 calloc(size_t n_elements, size_t element_size);
882 Returns a pointer to n_elements * element_size bytes, with all locations
883 set to zero.
884*/
885#if __STD_C
886Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
887#else
888Void_t* public_cALLOc();
889#endif
890
891/*
892 realloc(Void_t* p, size_t n)
893 Returns a pointer to a chunk of size n that contains the same data
894 as does chunk p up to the minimum of (n, p's size) bytes, or null
895 if no space is available.
896
897 The returned pointer may or may not be the same as p. The algorithm
898 prefers extending p when possible, otherwise it employs the
899 equivalent of a malloc-copy-free sequence.
900
901 If p is null, realloc is equivalent to malloc.
902
903 If space is not available, realloc returns null, errno is set (if on
904 ANSI) and p is NOT freed.
905
906 if n is for fewer bytes than already held by p, the newly unused
907 space is lopped off and freed if possible. Unless the #define
908 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
909 zero (re)allocates a minimum-sized chunk.
910
911 Large chunks that were internally obtained via mmap will always
912 be reallocated using malloc-copy-free sequences unless
913 the system supports MREMAP (currently only linux).
914
915 The old unix realloc convention of allowing the last-free'd chunk
916 to be used as an argument to realloc is not supported.
917*/
918#if __STD_C
919Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
920#else
921Void_t* public_rEALLOc();
922#endif
923
924/*
925 memalign(size_t alignment, size_t n);
926 Returns a pointer to a newly allocated chunk of n bytes, aligned
927 in accord with the alignment argument.
928
929 The alignment argument should be a power of two. If the argument is
930 not a power of two, the nearest greater power is used.
931 8-byte alignment is guaranteed by normal malloc calls, so don't
932 bother calling memalign with an argument of 8 or less.
933
934 Overreliance on memalign is a sure way to fragment space.
935*/
936#if __STD_C
937Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
938#else
939Void_t* public_mEMALIGn();
940#endif
941
942/*
943 posix_memalign(void** memptr, size_t alignment, size_t n);
944 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
945 in accord with the alignment argument. Returns 0 on success, otherwise
946 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
947
948 The alignment must be a power of two, and a multiple of sizeof(void *).
949*/
950#if __STD_C
951int public_posix_mEMALIGn(Void_t**, size_t, size_t);
952#else
953int public_posix_mEMALIGn();
954#endif
955
956/*
957 valloc(size_t n);
958 Equivalent to memalign(pagesize, n), where pagesize is the page
959 size of the system. If the pagesize is unknown, 4096 is used.
960*/
961#if __STD_C
962Void_t* public_vALLOc(size_t) __attribute_malloc__;
963#else
964Void_t* public_vALLOc();
965#endif
966
967
968
969/*
970 mallopt(int parameter_number, int parameter_value)
971 Sets tunable parameters The format is to provide a
972 (parameter-number, parameter-value) pair. mallopt then sets the
973 corresponding parameter to the argument value if it can (i.e., so
974 long as the value is meaningful), and returns 1 if successful else
975 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
976 normally defined in malloc.h. Only one of these (M_MXFAST) is used
977 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
978 so setting them has no effect. But this malloc also supports four
979 other options in mallopt. See below for details. Briefly, supported
980 parameters are as follows (listed defaults are for "typical"
981 configurations).
982
983 Symbol param # default allowed param values
984 M_MXFAST 1 64 0-80 (0 disables fastbins)
985 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
986 M_TOP_PAD -2 0 any
987 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
988 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
989*/
990#if __STD_C
991int public_mALLOPt(int, int);
992#else
993int public_mALLOPt();
994#endif
995
996
997/*
998 mallinfo()
999 Returns (by copy) a struct containing various summary statistics:
1000
1001 arena: current total non-mmapped bytes allocated from system
1002 ordblks: the number of free chunks
1003 smblks: the number of fastbin blocks (i.e., small chunks that
1004 have been freed but not use resused or consolidated)
1005 hblks: current number of mmapped regions
1006 hblkhd: total bytes held in mmapped regions
1007 usmblks: the maximum total allocated space. This will be greater
1008 than current total if trimming has occurred.
1009 fsmblks: total bytes held in fastbin blocks
1010 uordblks: current total allocated space (normal or mmapped)
1011 fordblks: total free space
1012 keepcost: the maximum number of bytes that could ideally be released
1013 back to system via malloc_trim. ("ideally" means that
1014 it ignores page restrictions etc.)
1015
1016 Because these fields are ints, but internal bookkeeping may
1017 be kept as longs, the reported values may wrap around zero and
1018 thus be inaccurate.
1019*/
1020#if __STD_C
1021struct mallinfo public_mALLINFo(void);
1022#else
1023struct mallinfo public_mALLINFo();
1024#endif
1025
1026/*
1027 pvalloc(size_t n);
1028 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1029 round up n to nearest pagesize.
1030 */
1031#if __STD_C
1032Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1033#else
1034Void_t* public_pVALLOc();
1035#endif
1036
1037/*
1038 malloc_trim(size_t pad);
1039
1040 If possible, gives memory back to the system (via negative
1041 arguments to sbrk) if there is unused memory at the `high' end of
1042 the malloc pool. You can call this after freeing large blocks of
1043 memory to potentially reduce the system-level memory requirements
1044 of a program. However, it cannot guarantee to reduce memory. Under
1045 some allocation patterns, some large free blocks of memory will be
1046 locked between two used chunks, so they cannot be given back to
1047 the system.
1048
1049 The `pad' argument to malloc_trim represents the amount of free
1050 trailing space to leave untrimmed. If this argument is zero,
1051 only the minimum amount of memory to maintain internal data
1052 structures will be left (one page or less). Non-zero arguments
1053 can be supplied to maintain enough trailing space to service
1054 future expected allocations without having to re-obtain memory
1055 from the system.
1056
1057 Malloc_trim returns 1 if it actually released any memory, else 0.
1058 On systems that do not support "negative sbrks", it will always
1059 rreturn 0.
1060*/
1061#if __STD_C
1062int public_mTRIm(size_t);
1063#else
1064int public_mTRIm();
1065#endif
1066
1067/*
1068 malloc_usable_size(Void_t* p);
1069
1070 Returns the number of bytes you can actually use in
1071 an allocated chunk, which may be more than you requested (although
1072 often not) due to alignment and minimum size constraints.
1073 You can use this many bytes without worrying about
1074 overwriting other allocated objects. This is not a particularly great
1075 programming practice. malloc_usable_size can be more useful in
1076 debugging and assertions, for example:
1077
1078 p = malloc(n);
1079 assert(malloc_usable_size(p) >= 256);
1080
1081*/
1082#if __STD_C
1083size_t public_mUSABLe(Void_t*);
1084#else
1085size_t public_mUSABLe();
1086#endif
1087
1088/*
1089 malloc_stats();
1090 Prints on stderr the amount of space obtained from the system (both
1091 via sbrk and mmap), the maximum amount (which may be more than
1092 current if malloc_trim and/or munmap got called), and the current
1093 number of bytes allocated via malloc (or realloc, etc) but not yet
1094 freed. Note that this is the number of bytes allocated, not the
1095 number requested. It will be larger than the number requested
1096 because of alignment and bookkeeping overhead. Because it includes
1097 alignment wastage as being in use, this figure may be greater than
1098 zero even when no user-level chunks are allocated.
1099
1100 The reported current and maximum system memory can be inaccurate if
1101 a program makes other calls to system memory allocation functions
1102 (normally sbrk) outside of malloc.
1103
1104 malloc_stats prints only the most commonly interesting statistics.
1105 More information can be obtained by calling mallinfo.
1106
1107*/
1108#if __STD_C
1109void public_mSTATs();
1110#else
1111void public_mSTATs();
1112#endif
1113
1114/* mallopt tuning options */
1115
1116/*
1117 M_MXFAST is the maximum request size used for "fastbins", special bins
1118 that hold returned chunks without consolidating their spaces. This
1119 enables future requests for chunks of the same size to be handled
1120 very quickly, but can increase fragmentation, and thus increase the
1121 overall memory footprint of a program.
1122
1123 This malloc manages fastbins very conservatively yet still
1124 efficiently, so fragmentation is rarely a problem for values less
1125 than or equal to the default. The maximum supported value of MXFAST
1126 is 80. You wouldn't want it any higher than this anyway. Fastbins
1127 are designed especially for use with many small structs, objects or
1128 strings -- the default handles structs/objects/arrays with sizes up
1129 to 16 4byte fields, or small strings representing words, tokens,
1130 etc. Using fastbins for larger objects normally worsens
1131 fragmentation without improving speed.
1132
1133 M_MXFAST is set in REQUEST size units. It is internally used in
1134 chunksize units, which adds padding and alignment. You can reduce
1135 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1136 algorithm to be a closer approximation of fifo-best-fit in all cases,
1137 not just for larger requests, but will generally cause it to be
1138 slower.
1139*/
1140
1141
1142/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1143#ifndef M_MXFAST
1144#define M_MXFAST 1
1145#endif
1146
1147#ifndef DEFAULT_MXFAST
1148#define DEFAULT_MXFAST 64
1149#endif
1150
1151
1152/*
1153 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1154 to keep before releasing via malloc_trim in free().
1155
1156 Automatic trimming is mainly useful in long-lived programs.
1157 Because trimming via sbrk can be slow on some systems, and can
1158 sometimes be wasteful (in cases where programs immediately
1159 afterward allocate more large chunks) the value should be high
1160 enough so that your overall system performance would improve by
1161 releasing this much memory.
1162
1163 The trim threshold and the mmap control parameters (see below)
1164 can be traded off with one another. Trimming and mmapping are
1165 two different ways of releasing unused memory back to the
1166 system. Between these two, it is often possible to keep
1167 system-level demands of a long-lived program down to a bare
1168 minimum. For example, in one test suite of sessions measuring
1169 the XF86 X server on Linux, using a trim threshold of 128K and a
1170 mmap threshold of 192K led to near-minimal long term resource
1171 consumption.
1172
1173 If you are using this malloc in a long-lived program, it should
1174 pay to experiment with these values. As a rough guide, you
1175 might set to a value close to the average size of a process
1176 (program) running on your system. Releasing this much memory
1177 would allow such a process to run in memory. Generally, it's
1178 worth it to tune for trimming rather tham memory mapping when a
1179 program undergoes phases where several large chunks are
1180 allocated and released in ways that can reuse each other's
1181 storage, perhaps mixed with phases where there are no such
1182 chunks at all. And in well-behaved long-lived programs,
1183 controlling release of large blocks via trimming versus mapping
1184 is usually faster.
1185
1186 However, in most programs, these parameters serve mainly as
1187 protection against the system-level effects of carrying around
1188 massive amounts of unneeded memory. Since frequent calls to
1189 sbrk, mmap, and munmap otherwise degrade performance, the default
1190 parameters are set to relatively high values that serve only as
1191 safeguards.
1192
1193 The trim value must be greater than page size to have any useful
1194 effect. To disable trimming completely, you can set to
1195 (unsigned long)(-1)
1196
1197 Trim settings interact with fastbin (MXFAST) settings: Unless
1198 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1199 freeing a chunk with size less than or equal to MXFAST. Trimming is
1200 instead delayed until subsequent freeing of larger chunks. However,
1201 you can still force an attempted trim by calling malloc_trim.
1202
1203 Also, trimming is not generally possible in cases where
1204 the main arena is obtained via mmap.
1205
1206 Note that the trick some people use of mallocing a huge space and
1207 then freeing it at program startup, in an attempt to reserve system
1208 memory, doesn't have the intended effect under automatic trimming,
1209 since that memory will immediately be returned to the system.
1210*/
1211
1212#define M_TRIM_THRESHOLD -1
1213
1214#ifndef DEFAULT_TRIM_THRESHOLD
1215#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1216#endif
1217
1218/*
1219 M_TOP_PAD is the amount of extra `padding' space to allocate or
1220 retain whenever sbrk is called. It is used in two ways internally:
1221
1222 * When sbrk is called to extend the top of the arena to satisfy
1223 a new malloc request, this much padding is added to the sbrk
1224 request.
1225
1226 * When malloc_trim is called automatically from free(),
1227 it is used as the `pad' argument.
1228
1229 In both cases, the actual amount of padding is rounded
1230 so that the end of the arena is always a system page boundary.
1231
1232 The main reason for using padding is to avoid calling sbrk so
1233 often. Having even a small pad greatly reduces the likelihood
1234 that nearly every malloc request during program start-up (or
1235 after trimming) will invoke sbrk, which needlessly wastes
1236 time.
1237
1238 Automatic rounding-up to page-size units is normally sufficient
1239 to avoid measurable overhead, so the default is 0. However, in
1240 systems where sbrk is relatively slow, it can pay to increase
1241 this value, at the expense of carrying around more memory than
1242 the program needs.
1243*/
1244
1245#define M_TOP_PAD -2
1246
1247#ifndef DEFAULT_TOP_PAD
1248#define DEFAULT_TOP_PAD (0)
1249#endif
1250
1251/*
1252 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1253 to service a request. Requests of at least this size that cannot
1254 be allocated using already-existing space will be serviced via mmap.
1255 (If enough normal freed space already exists it is used instead.)
1256
1257 Using mmap segregates relatively large chunks of memory so that
1258 they can be individually obtained and released from the host
1259 system. A request serviced through mmap is never reused by any
1260 other request (at least not directly; the system may just so
1261 happen to remap successive requests to the same locations).
1262
1263 Segregating space in this way has the benefits that:
1264
1265 1. Mmapped space can ALWAYS be individually released back
1266 to the system, which helps keep the system level memory
1267 demands of a long-lived program low.
1268 2. Mapped memory can never become `locked' between
1269 other chunks, as can happen with normally allocated chunks, which
1270 means that even trimming via malloc_trim would not release them.
1271 3. On some systems with "holes" in address spaces, mmap can obtain
1272 memory that sbrk cannot.
1273
1274 However, it has the disadvantages that:
1275
1276 1. The space cannot be reclaimed, consolidated, and then
1277 used to service later requests, as happens with normal chunks.
1278 2. It can lead to more wastage because of mmap page alignment
1279 requirements
1280 3. It causes malloc performance to be more dependent on host
1281 system memory management support routines which may vary in
1282 implementation quality and may impose arbitrary
1283 limitations. Generally, servicing a request via normal
1284 malloc steps is faster than going through a system's mmap.
1285
1286 The advantages of mmap nearly always outweigh disadvantages for
1287 "large" chunks, but the value of "large" varies across systems. The
1288 default is an empirically derived value that works well in most
1289 systems.
1290*/
1291
1292#define M_MMAP_THRESHOLD -3
1293
1294#ifndef DEFAULT_MMAP_THRESHOLD
1295#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1296#endif
1297
1298/*
1299 M_MMAP_MAX is the maximum number of requests to simultaneously
1300 service using mmap. This parameter exists because
1301. Some systems have a limited number of internal tables for
1302 use by mmap, and using more than a few of them may degrade
1303 performance.
1304
1305 The default is set to a value that serves only as a safeguard.
1306 Setting to 0 disables use of mmap for servicing large requests. If
1307 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1308 to non-zero values in mallopt will fail.
1309*/
1310
1311#define M_MMAP_MAX -4
1312
1313#ifndef DEFAULT_MMAP_MAX
1314#define DEFAULT_MMAP_MAX (65536)
1315#endif
1316
1317#ifdef __cplusplus
1318}; /* end of extern "C" */
1319#endif
1320
1321/*
1322 ========================================================================
1323 To make a fully customizable malloc.h header file, cut everything
1324 above this line, put into file malloc.h, edit to suit, and #include it
1325 on the next line, as well as in programs that use this malloc.
1326 ========================================================================
1327*/
1328
1329/* #include "malloc.h" */
1330
1331/* --------------------- public wrappers ---------------------- */
1332
1333#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1334
1335/* DL_STATIC used to make functions (deep down) consistent
1336 * with prototypes (otherwise the prototypes are static
1337 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1338 * The gcc compiler doesn't care, but the HP-UX compiler does.
1339 */
1340#define DL_STATIC static
1341
1342/* Declare all routines as internal */
1343#if __STD_C
1344static Void_t* mALLOc(size_t) __attribute_malloc__;
1345static void fREe(Void_t*);
1346static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1347static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1348static int posix_mEMALIGn(Void_t**, size_t, size_t);
1349static Void_t* vALLOc(size_t) __attribute_malloc__;
1350static Void_t* pVALLOc(size_t) __attribute_malloc__;
1351static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1352static int mTRIm(size_t);
1353static size_t mUSABLe(Void_t*);
1354static void mSTATs();
1355static int mALLOPt(int, int);
1356static struct mallinfo mALLINFo(void);
1357#else
1358static Void_t* mALLOc();
1359static void fREe();
1360static Void_t* rEALLOc();
1361static Void_t* mEMALIGn();
1362static int posix_mEMALIGn();
1363static Void_t* vALLOc();
1364static Void_t* pVALLOc();
1365static Void_t* cALLOc();
1366static int mTRIm();
1367static size_t mUSABLe();
1368static void mSTATs();
1369static int mALLOPt();
1370static struct mallinfo mALLINFo();
1371#endif
1372
1373/*
1374 MALLOC_PREACTION and MALLOC_POSTACTION should be
1375 defined to return 0 on success, and nonzero on failure.
1376 The return value of MALLOC_POSTACTION is currently ignored
1377 in wrapper functions since there is no reasonable default
1378 action to take on failure.
1379*/
1380
1381
1382#ifdef USE_MALLOC_LOCK
1383
1384# ifdef WIN32
1385
1386static int mALLOC_MUTEx;
1387#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1388#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1389int dnmalloc_pthread_init(void) { return 0; }
1390
1391# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1392
1393# if defined(__NetBSD__)
1394#include <reentrant.h>
1395extern int __isthreaded;
1396static mutex_t thread_lock = MUTEX_INITIALIZER;
1397#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1398#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1399void _malloc_prefork(void) { _MALLOC_LOCK(); }
1400void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1401# endif
1402
1403# if defined(__OpenBSD__)
1404extern int __isthreaded;
1405void _thread_malloc_lock(void);
1406void _thread_malloc_unlock(void);
1407#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1408#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1409# endif
1410
1411# if defined(__FreeBSD__)
1412extern int __isthreaded;
1413struct _spinlock {
1414 volatile long access_lock;
1415 volatile long lock_owner;
1416 volatile char *fname;
1417 volatile int lineno;
1418};
1419typedef struct _spinlock spinlock_t;
1420#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1421void _spinlock(spinlock_t *);
1422void _spinunlock(spinlock_t *);
1423/* # include "/usr/src/lib/libc/include/spinlock.h" */
1424static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1425spinlock_t *__malloc_lock = &thread_lock;
1426#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1427#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1428# endif
1429
1430/* Common for all three *BSD
1431 */
1432static int malloc_active = 0;
1433static int dnmalloc_mutex_lock()
1434{
1435 _MALLOC_LOCK();
1436 if (!malloc_active)
1437 {
1438 ++malloc_active;
1439 return 0;
1440 }
1441 assert(malloc_active == 0);
1442 _MALLOC_UNLOCK();
1443 errno = EDEADLK;
1444 return 1;
1445}
1446static int dnmalloc_mutex_unlock()
1447{
1448 --malloc_active;
1449 _MALLOC_UNLOCK();
1450 return 0;
1451}
1452#define MALLOC_PREACTION dnmalloc_mutex_lock()
1453#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1454int dnmalloc_pthread_init(void) { return 0; }
1455
1456# else
1457
1458/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1459 *
1460 * Works fine on linux (no malloc in pthread_mutex_lock)
1461 * Works with on HP-UX if initialized after entering main()
1462 */
1463#include <pthread.h>
1464static int malloc_active = 0;
1465void dnmalloc_fork_prepare(void);
1466void dnmalloc_fork_parent(void);
1467void dnmalloc_fork_child(void);
1468
1469#if !defined(__linux__)
1470
1471static pthread_mutex_t mALLOC_MUTEx;
1472pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1473static int dnmalloc_use_mutex = 0;
1474static void dnmalloc_pthread_init_int(void)
1475{
1476 pthread_mutexattr_t mta;
1477 pthread_mutexattr_init(&mta);
1478 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1479 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1480 pthread_mutexattr_destroy(&mta);
1481 pthread_atfork(dnmalloc_fork_prepare,
1482 dnmalloc_fork_parent,
1483 dnmalloc_fork_child);
1484 dnmalloc_use_mutex = 1;
1485}
1486int dnmalloc_pthread_init(void)
1487{
1488 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1489}
1490
1491#else
1492
1493static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1494static int dnmalloc_use_mutex = 1;
1495int dnmalloc_pthread_init(void) {
1496 return pthread_atfork(dnmalloc_fork_prepare,
1497 dnmalloc_fork_parent,
1498 dnmalloc_fork_child);
1499}
1500#endif /* !defined(__linux__) */
1501
1502void dnmalloc_fork_prepare(void) {
1503 if (dnmalloc_use_mutex)
1504 pthread_mutex_lock(&mALLOC_MUTEx);
1505}
1506void dnmalloc_fork_parent(void) {
1507 if (dnmalloc_use_mutex)
1508 pthread_mutex_unlock(&mALLOC_MUTEx);
1509}
[174]1510void dnmalloc_fork_child(void) {
[180]1511 int rc = 0;
[171]1512#ifdef __GLIBC__
1513 if (dnmalloc_use_mutex)
[174]1514 {
1515 pthread_mutex_unlock (&mALLOC_MUTEx);
1516 pthread_mutex_destroy(&mALLOC_MUTEx);
1517 rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1518 }
[171]1519#else
1520 if (dnmalloc_use_mutex)
[174]1521 rc = pthread_mutex_unlock(&mALLOC_MUTEx);
[171]1522#endif
[174]1523 if (rc != 0)
1524 {
1525 fputs("fork_child failed", stderr);
1526 _exit(EXIT_FAILURE);
1527 }
[171]1528}
1529static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1530{
1531 if (dnmalloc_use_mutex)
1532 {
1533 int rc = pthread_mutex_lock(mutex);
1534 if (rc == 0)
1535 {
1536 if (!malloc_active)
1537 {
1538 ++malloc_active;
1539 return 0;
1540 }
1541 assert(malloc_active == 0);
1542 (void) pthread_mutex_unlock(mutex);
1543 errno = EDEADLK;
1544 return 1;
1545 }
1546 return rc;
1547 }
1548 return 0;
1549}
1550static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1551{
1552 if (dnmalloc_use_mutex)
1553 {
1554 --malloc_active;
1555 return pthread_mutex_unlock(mutex);
1556 }
1557 return 0;
1558}
1559# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1560# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1561
1562# endif
1563
1564#else
1565
1566/* Substitute anything you like for these */
1567
1568# define MALLOC_PREACTION (0)
1569# define MALLOC_POSTACTION (0)
1570int dnmalloc_pthread_init(void) { return 0; }
1571
1572#endif /* USE_MALLOC_LOCK */
1573
1574Void_t* public_mALLOc(size_t bytes) {
1575 Void_t* m;
1576 if (MALLOC_PREACTION == 0) {
1577 m = mALLOc(bytes);
1578 (void) MALLOC_POSTACTION;
1579 return m;
1580 }
1581 return 0;
1582}
1583
1584void public_fREe(Void_t* m) {
1585 if (MALLOC_PREACTION == 0) {
1586 fREe(m);
1587 (void) MALLOC_POSTACTION;
1588 }
1589}
1590
1591Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1592 if (MALLOC_PREACTION == 0) {
1593 m = rEALLOc(m, bytes);
1594 (void) MALLOC_POSTACTION;
1595 return m;
1596 }
1597 return 0;
1598}
1599
1600Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1601 Void_t* m;
1602 if (MALLOC_PREACTION == 0) {
1603 m = mEMALIGn(alignment, bytes);
1604 (void) MALLOC_POSTACTION;
1605 return m;
1606 }
1607 return 0;
1608}
1609
1610int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1611 int m, ret;
1612 if ((ret = MALLOC_PREACTION) == 0) {
1613 m = posix_mEMALIGn(memptr, alignment, bytes);
1614 (void) MALLOC_POSTACTION;
1615 return m;
1616 }
1617 return ret;
1618}
1619
1620Void_t* public_vALLOc(size_t bytes) {
1621 Void_t* m;
1622 if (MALLOC_PREACTION == 0) {
1623 m = vALLOc(bytes);
1624 (void) MALLOC_POSTACTION;
1625 return m;
1626 }
1627 return 0;
1628}
1629
1630Void_t* public_pVALLOc(size_t bytes) {
1631 Void_t* m;
1632 if (MALLOC_PREACTION == 0) {
1633 m = pVALLOc(bytes);
1634 (void) MALLOC_POSTACTION;
1635 return m;
1636 }
1637 return 0;
1638}
1639
1640Void_t* public_cALLOc(size_t n, size_t elem_size) {
1641 Void_t* m;
1642 if (MALLOC_PREACTION == 0) {
1643 m = cALLOc(n, elem_size);
1644 (void) MALLOC_POSTACTION;
1645 return m;
1646 }
1647 return 0;
1648}
1649
1650int public_mTRIm(size_t s) {
1651 int result;
1652 if (MALLOC_PREACTION == 0) {
1653 result = mTRIm(s);
1654 (void) MALLOC_POSTACTION;
1655 return result;
1656 }
1657 return 0;
1658}
1659
1660size_t public_mUSABLe(Void_t* m) {
1661 size_t result;
1662 if (MALLOC_PREACTION == 0) {
1663 result = mUSABLe(m);
1664 (void) MALLOC_POSTACTION;
1665 return result;
1666 }
1667 return 0;
1668}
1669
1670void public_mSTATs() {
1671 if (MALLOC_PREACTION == 0) {
1672 mSTATs();
1673 (void) MALLOC_POSTACTION;
1674 }
1675}
1676
1677struct mallinfo public_mALLINFo() {
1678 struct mallinfo m;
1679 if (MALLOC_PREACTION == 0) {
1680 m = mALLINFo();
1681 (void) MALLOC_POSTACTION;
1682 return m;
1683 } else {
1684 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1685 return nm;
1686 }
1687}
1688
1689int public_mALLOPt(int p, int v) {
1690 int result;
1691 if (MALLOC_PREACTION == 0) {
1692 result = mALLOPt(p, v);
1693 (void) MALLOC_POSTACTION;
1694 return result;
1695 }
1696 return 0;
1697}
1698
1699#else
1700
1701int dnmalloc_pthread_init(void) { return 0; }
1702#define DL_STATIC
1703
1704#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1705
1706
1707
1708/* ------------- Optional versions of memcopy ---------------- */
1709
1710
1711#if USE_MEMCPY
1712
1713/*
1714 Note: memcpy is ONLY invoked with non-overlapping regions,
1715 so the (usually slower) memmove is not needed.
1716*/
1717
1718#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1719#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1720
1721#else /* !USE_MEMCPY */
1722
1723/* Use Duff's device for good zeroing/copying performance. */
1724
1725#define MALLOC_ZERO(charp, nbytes) \
1726do { \
1727 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1728 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1729 long mcn; \
1730 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1731 switch (mctmp) { \
1732 case 0: for(;;) { *mzp++ = 0; \
1733 case 7: *mzp++ = 0; \
1734 case 6: *mzp++ = 0; \
1735 case 5: *mzp++ = 0; \
1736 case 4: *mzp++ = 0; \
1737 case 3: *mzp++ = 0; \
1738 case 2: *mzp++ = 0; \
1739 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1740 } \
1741} while(0)
1742
1743#define MALLOC_COPY(dest,src,nbytes) \
1744do { \
1745 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1746 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1747 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1748 long mcn; \
1749 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1750 switch (mctmp) { \
1751 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1752 case 7: *mcdst++ = *mcsrc++; \
1753 case 6: *mcdst++ = *mcsrc++; \
1754 case 5: *mcdst++ = *mcsrc++; \
1755 case 4: *mcdst++ = *mcsrc++; \
1756 case 3: *mcdst++ = *mcsrc++; \
1757 case 2: *mcdst++ = *mcsrc++; \
1758 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1759 } \
1760} while(0)
1761
1762#endif
1763
1764/* ------------------ MMAP support ------------------ */
1765
1766
1767#if defined(HAVE_FCNTL_H)
1768#include <fcntl.h>
1769#endif
1770
1771#if defined(HAVE_SYS_MMAN_H)
1772#include <sys/mman.h>
1773#endif
1774
1775#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1776#define MAP_ANONYMOUS MAP_ANON
1777#endif
1778
1779/*
1780 Nearly all versions of mmap support MAP_ANONYMOUS,
1781 so the following is unlikely to be needed, but is
1782 supplied just in case.
1783*/
1784
1785#ifndef MAP_ANONYMOUS
1786
1787/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1788 */
1789void * anon_mmap (void *addr, size_t length, int prot, int flags)
1790{
1791 void * retval = NULL;
1792 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1793
1794 dev_zero_fd = open("/dev/zero", O_RDWR);
1795 if (dev_zero_fd >= 0)
1796 {
1797 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1798 /* closing the file descriptor does not unmap the region */
1799 close(dev_zero_fd);
1800 }
1801 return retval;
1802}
1803
1804#define MMAP(addr, size, prot, flags) \
1805 (anon_mmap((addr), (size), (prot), (flags)))
1806
1807
1808#else /* have MAP_ANONYMOUS */
1809
1810#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1811#define MAP_32BIT MAP_ADDR32
1812#endif
1813
1814#if defined(MAP_32BIT)
1815#define MMAP(addr, size, prot, flags) \
1816 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1817#elif defined(__sun)
1818/*
1819 * Hint an address within 32bit address space
1820 */
1821#define MMAP(addr, size, prot, flags) \
1822 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1823#else
1824/* *BSD */
1825#define MMAP(addr, size, prot, flags) \
1826 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1827#endif
1828
1829#endif /* have MAP_ANONYMOUS */
1830
1831
1832/*
1833 ----------------------- Chunk representations -----------------------
1834*/
1835
1836typedef void * mchunkptr;
1837
1838struct chunkinfo {
1839 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1840 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1841 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1842 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1843 in the linked list if the hash
1844 value is the same as the chunk */
1845 struct chunkinfo* fd; /* double links -- used only if free. */
1846 struct chunkinfo* bk;
1847 mchunkptr chunk;
1848};
1849
1850typedef struct chunkinfo* chunkinfoptr;
1851
1852struct cireginfo {
1853 unsigned long position;
1854 unsigned long *freebitmap;
1855 struct cireginfo* next;
1856 struct chunkinfo *freelist;
1857 struct chunkinfo *begin;
1858 unsigned long freecounter;
1859};
1860
1861/*
1862 ---------- Size and alignment checks and conversions ----------
1863*/
1864
1865/* conversion from malloc headers to user pointers, and back */
1866#define chunk(p) (p->chunk)
1867
1868
1869#define chunk2mem(p) (chunk(p))
1870#define mem2chunk(mem) (hashtable_lookup(mem))
1871
[187]1872/* The smallest possible chunk */
1873/* #define MIN_CHUNK_SIZE 16 */
1874#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
1875# define MIN_CHUNK_SIZE 32
1876#else
1877# define MIN_CHUNK_SIZE 16
1878#endif
[171]1879
1880/* The smallest size we can malloc is an aligned minimal chunk */
1881
1882#define MINSIZE \
1883 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1884
1885/* Check if m has acceptable alignment */
1886
1887#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1888
1889#define GUARD_SIZE 4
1890
1891/*
1892 Check if a request is so large that it would wrap around zero when
1893 padded and aligned. To simplify some other code, the bound is made
1894 low enough so that adding MINSIZE will also not wrap around zero.
1895
1896 Make it 4*MINSIZE.
1897*/
1898
1899#define REQUEST_OUT_OF_RANGE(req) \
1900 ((CHUNK_SIZE_T)(req) >= \
1901 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1902
1903/* pad request bytes into a usable size -- internal version */
1904
1905#define request2size(req) \
1906 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1907 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1908 MINSIZE)
1909
1910/* Same, except also perform argument check */
1911
1912#define checked_request2size(req, sz) \
1913 if (!REQUEST_OUT_OF_RANGE(req)) { \
1914 (sz) = request2size(req); \
1915 assert((sz-req) >= GUARD_SIZE); \
1916 } else { \
1917 MALLOC_FAILURE_ACTION; \
1918 return 0; \
1919 }
1920
1921#if PARANOIA > 2
1922static char * guard_set_p;
1923static char * guard_set_q;
1924
1925#define guard_set(guard, P, request, sz) \
1926 assert((sz-request) >= GUARD_SIZE); \
1927 guard_set_p = (char*)(chunk(P)); \
1928 guard_set_p += request; \
[172]1929 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
[171]1930 guard_set_q = (char*)(guard); \
1931 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1932 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1933 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1934 *guard_set_p = *guard_set_q; \
[172]1935 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
[171]1936 (P)->req = request
1937
1938#define guard_check(guard, P) \
[172]1939 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1940 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1941 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
[171]1942
1943#else
1944#define guard_set(guard, P, request, sz) ((void)0)
1945#define guard_check(guard, P) ((void)0)
1946#endif /* PARANOIA > 2 */
1947
1948/* dnmalloc forward declarations */
1949static char * dnmalloc_arc4random(void);
1950static void dnmalloc_init (void);
1951static void malloc_mmap_state(void);
1952static void cireg_extend (void);
1953static chunkinfoptr cireg_getfree (void);
1954static void hashtable_add (chunkinfoptr ci);
1955static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1956static void hashtable_remove (mchunkptr p);
1957static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1958static chunkinfoptr hashtable_lookup (mchunkptr p);
1959static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1960static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1961
1962
1963
1964/*
1965 --------------- Physical chunk operations ---------------
1966*/
1967
1968
1969/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1970#define PREV_INUSE 0x1
1971
1972/* extract inuse bit of previous chunk */
1973#define prev_inuse(p) ((p)->size & PREV_INUSE)
1974
1975/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1976#define IS_MMAPPED 0x2
1977
1978/* check for mmap()'ed chunk */
1979#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1980
1981
1982/* size field is or'ed when the chunk is in use */
1983#define INUSE 0x4
1984
1985/* extract inuse bit of chunk */
1986#define inuse(p) ((p)->size & INUSE)
1987
1988/*
1989 Bits to mask off when extracting size
1990
1991 Note: IS_MMAPPED is intentionally not masked off from size field in
1992 macros for which mmapped chunks should never be seen. This should
1993 cause helpful core dumps to occur if it is tried by accident by
1994 people extending or adapting this malloc.
1995*/
1996#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
1997
1998/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
1999#define SIZE_NOMMAP (PREV_INUSE|INUSE)
2000
2001/* Get size, ignoring use bits */
2002#define chunksize(p) ((p)->size & ~(SIZE_BITS))
2003
2004/* Ptr to chunkinfo of next physical malloc_chunk. */
2005#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2006
2007/* Treat space at ptr + offset as a chunk */
2008#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2009
2010/* set/clear chunk as being inuse without otherwise disturbing */
2011#define set_inuse(p) ((p)->size |= INUSE)
2012
2013#define clear_inuse(p) ((p)->size &= ~(INUSE))
2014
2015#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2016
2017#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2018
2019static void set_previnuse_next (chunkinfoptr p)
2020{
2021 chunkinfoptr q;
2022 q = next_chunkinfo (p);
2023 if (q)
2024 set_previnuse (q);
2025}
2026
2027#define set_all_inuse(p) \
2028set_inuse(p); \
2029set_previnuse_next(p);
2030
2031
2032/* Set size at head, without disturbing its use bit */
2033#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2034
2035/* Set size/use field */
2036#define set_head(p, s) ((p)->size = (s))
2037
2038/*
2039 Bins
2040
2041 An array of bin headers for free chunks. Each bin is doubly
2042 linked. The bins are approximately proportionally (log) spaced.
2043 There are a lot of these bins (128). This may look excessive, but
2044 works very well in practice. Most bins hold sizes that are
2045 unusual as malloc request sizes, but are more usual for fragments
2046 and consolidated sets of chunks, which is what these bins hold, so
2047 they can be found quickly. All procedures maintain the invariant
2048 that no consolidated chunk physically borders another one, so each
2049 chunk in a list is known to be preceeded and followed by either
2050 inuse chunks or the ends of memory.
2051
2052 Chunks in bins are kept in size order, with ties going to the
2053 approximately least recently used chunk. Ordering isn't needed
2054 for the small bins, which all contain the same-sized chunks, but
2055 facilitates best-fit allocation for larger chunks. These lists
2056 are just sequential. Keeping them in order almost never requires
2057 enough traversal to warrant using fancier ordered data
2058 structures.
2059
2060 Chunks of the same size are linked with the most
2061 recently freed at the front, and allocations are taken from the
2062 back. This results in LRU (FIFO) allocation order, which tends
2063 to give each chunk an equal opportunity to be consolidated with
2064 adjacent freed chunks, resulting in larger free chunks and less
2065 fragmentation.
2066
2067 To simplify use in double-linked lists, each bin header acts
2068 as a malloc_chunk. This avoids special-casing for headers.
2069 But to conserve space and improve locality, we allocate
2070 only the fd/bk pointers of bins, and then use repositioning tricks
2071 to treat these as the fields of a malloc_chunk*.
2072*/
2073
2074typedef struct chunkinfo* mbinptr;
2075
2076/* addressing -- note that bin_at(0) does not exist */
2077#define bin_at(m, i) (&(m)->bins[i])
2078
2079/* analog of ++bin */
2080#define next_bin(b) (b+1)
2081
2082/* Reminders about list directionality within bins */
2083#define first(b) ((b)->fd)
2084#define last(b) ((b)->bk)
2085
2086/* Take a chunk off a bin list */
2087#define unlink(P, BK, FD) { \
2088 FD = P->fd; \
2089 BK = P->bk; \
2090 FD->bk = BK; \
2091 BK->fd = FD; \
2092}
2093
2094/*
2095 Indexing
2096
2097 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2098 8 bytes apart. Larger bins are approximately logarithmically spaced:
2099
2100 64 bins of size 8
2101 32 bins of size 64
2102 16 bins of size 512
2103 8 bins of size 4096
2104 4 bins of size 32768
2105 2 bins of size 262144
2106 1 bin of size what's left
2107
2108 The bins top out around 1MB because we expect to service large
2109 requests via mmap.
2110*/
2111
2112#define NBINS 96
2113#define NSMALLBINS 32
2114#define SMALLBIN_WIDTH 8
2115#define MIN_LARGE_SIZE 256
2116
2117#define in_smallbin_range(sz) \
2118 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2119
2120#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2121
2122/*
2123 Compute index for size. We expect this to be inlined when
2124 compiled with optimization, else not, which works out well.
2125*/
2126static int largebin_index(size_t sz) {
2127
2128 unsigned long xx = sz >> SMALLBIN_WIDTH;
2129
2130 if (xx < 0x10000)
2131 {
2132 unsigned int m; /* bit position of highest set bit of m */
2133
2134 /* On intel, use BSRL instruction to find highest bit */
2135#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2136
2137 unsigned int x = (unsigned int) xx;
2138
2139 __asm__("bsrl %1,%0\n\t"
2140 : "=r" (m)
2141 : "rm" (x));
2142
2143#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2144
2145 __asm__("bsrq %1,%0\n\t"
2146 : "=r" (m)
2147 : "rm" (xx));
2148
2149#else
2150
2151 /* Taken from Bit Twiddling Hacks
2152 * http://graphics.stanford.edu/~seander/bithacks.html
2153 * public domain
2154 */
2155 unsigned int v = (unsigned int) xx;
2156 register unsigned int shift;
2157
2158 m = (v > 0xFFFF) << 4; v >>= m;
2159 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2160 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2161 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2162 m |= (v >> 1);
2163
2164#endif
2165
2166 /* Use next 2 bits to create finer-granularity bins */
2167 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2168 }
2169 else
2170 {
2171 return NBINS-1;
2172 }
2173}
2174
2175#define bin_index(sz) \
2176 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2177
2178/*
2179 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2180 first bin that is maintained in sorted order. This must
2181 be the smallest size corresponding to a given bin.
2182
2183 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2184 best fit guarantees to sometimes speed up malloc by increasing value.
2185 Doing this means that malloc may choose a chunk that is
2186 non-best-fitting by up to the width of the bin.
2187
2188 Some useful cutoff values:
2189 512 - all bins sorted
2190 2560 - leaves bins <= 64 bytes wide unsorted
2191 12288 - leaves bins <= 512 bytes wide unsorted
2192 65536 - leaves bins <= 4096 bytes wide unsorted
2193 262144 - leaves bins <= 32768 bytes wide unsorted
2194 -1 - no bins sorted (not recommended!)
2195*/
2196
2197/* #define FIRST_SORTED_BIN_SIZE 65536 */
2198
2199#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2200
[172]2201
[171]2202/*
2203 Unsorted chunks
2204
2205 All remainders from chunk splits, as well as all returned chunks,
2206 are first placed in the "unsorted" bin. They are then placed
2207 in regular bins after malloc gives them ONE chance to be used before
2208 binning. So, basically, the unsorted_chunks list acts as a queue,
2209 with chunks being placed on it in free (and malloc_consolidate),
2210 and taken off (to be either used or placed in bins) in malloc.
2211*/
2212
2213/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2214#define unsorted_chunks(M) (bin_at(M, 1))
2215
2216/*
2217 Top
2218
2219 The top-most available chunk (i.e., the one bordering the end of
2220 available memory) is treated specially. It is never included in
2221 any bin, is used only if no other chunk is available, and is
2222 released back to the system if it is very large (see
2223 M_TRIM_THRESHOLD). Because top initially
2224 points to its own bin with initial zero size, thus forcing
2225 extension on the first malloc request, we avoid having any special
2226 code in malloc to check whether it even exists yet. But we still
2227 need to do so when getting memory from system, so we make
2228 initial_top treat the bin as a legal but unusable chunk during the
2229 interval between initialization and the first call to
2230 sYSMALLOc. (This is somewhat delicate, since it relies on
2231 the 2 preceding words to be zero during this interval as well.)
2232*/
2233
2234/* Conveniently, the unsorted bin can be used as dummy top on first call */
2235#define initial_top(M) (unsorted_chunks(M))
2236
2237/*
2238 Binmap
2239
2240 To help compensate for the large number of bins, a one-level index
2241 structure is used for bin-by-bin searching. `binmap' is a
2242 bitvector recording whether bins are definitely empty so they can
2243 be skipped over during during traversals. The bits are NOT always
2244 cleared as soon as bins are empty, but instead only
2245 when they are noticed to be empty during traversal in malloc.
2246*/
2247
2248/* Conservatively use 32 bits per map word, even if on 64bit system */
2249#define BINMAPSHIFT 5
2250#define BITSPERMAP (1U << BINMAPSHIFT)
2251#define BINMAPSIZE (NBINS / BITSPERMAP)
2252
2253#define idx2block(i) ((i) >> BINMAPSHIFT)
2254#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2255
2256#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2257#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2258#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2259
2260/*
2261 Fastbins
2262
2263 An array of lists holding recently freed small chunks. Fastbins
2264 are not doubly linked. It is faster to single-link them, and
2265 since chunks are never removed from the middles of these lists,
2266 double linking is not necessary. Also, unlike regular bins, they
2267 are not even processed in FIFO order (they use faster LIFO) since
2268 ordering doesn't much matter in the transient contexts in which
2269 fastbins are normally used.
2270
2271 Chunks in fastbins keep their inuse bit set, so they cannot
2272 be consolidated with other free chunks. malloc_consolidate
2273 releases all chunks in fastbins and consolidates them with
2274 other free chunks.
2275*/
2276
2277typedef struct chunkinfo* mfastbinptr;
2278
2279/* offset 2 to use otherwise unindexable first 2 bins */
2280#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2281
2282/* The maximum fastbin request size we support */
2283#define MAX_FAST_SIZE 80
2284
2285#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2286
2287/*
2288 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2289 that triggers automatic consolidation of possibly-surrounding
2290 fastbin chunks. This is a heuristic, so the exact value should not
2291 matter too much. It is defined at half the default trim threshold as a
2292 compromise heuristic to only attempt consolidation if it is likely
2293 to lead to trimming. However, it is not dynamically tunable, since
2294 consolidation reduces fragmentation surrounding loarge chunks even
2295 if trimming is not used.
2296*/
2297
2298#define FASTBIN_CONSOLIDATION_THRESHOLD \
2299 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2300
2301/*
2302 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2303 they are used as flags.
2304*/
2305
2306/*
2307 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2308 freed chunks at all. It is set true when entering a chunk into any
2309 bin.
2310*/
2311
2312#define ANYCHUNKS_BIT (1U)
2313
2314#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2315#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2316#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2317
2318/*
2319 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2320 some fastbin chunks. It is set true on entering a chunk into any
2321 fastbin, and cleared only in malloc_consolidate.
2322*/
2323
2324#define FASTCHUNKS_BIT (2U)
2325
2326#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2327#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2328#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2329
2330/*
2331 Set value of max_fast.
2332 Use impossibly small value if 0.
2333*/
2334
2335#define set_max_fast(M, s) \
2336 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2337 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2338
2339#define get_max_fast(M) \
2340 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2341
2342
2343/*
2344 morecore_properties is a status word holding dynamically discovered
2345 or controlled properties of the morecore function
2346*/
2347
2348#define MORECORE_CONTIGUOUS_BIT (1U)
2349
2350#define contiguous(M) \
2351 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2352#define noncontiguous(M) \
2353 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2354#define set_contiguous(M) \
2355 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2356#define set_noncontiguous(M) \
2357 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2358
2359#define MORECORE_32BIT_BIT (2U)
2360
2361#define morecore32bit(M) \
2362 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2363#define nonmorecore32bit(M) \
2364 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2365#define set_morecore32bit(M) \
2366 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2367#define set_nonmorecore32bit(M) \
2368 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2369
2370
2371
2372/* ----------------- dnmalloc -------------------- */
2373
2374/* size of pages */
2375#define PGSIZE malloc_getpagesize
2376/* pointer size */
2377#define PTRSIZE sizeof(long)
2378
2379
2380
2381/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2382 address space: the hashtable has granularity of 16*8, set it to something
2383 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2384 page)
2385*/
2386
2387/* The maximum heap size that dnmalloc can operate with
2388 * represented in hex to avoid annoying gcc warning
2389 *
2390 * Avoid integer overflow, cover complete 32bit address
2391 * space for portability. With deferred allocation, the
2392 * hashtable size is a non-issue.
2393 */
2394#define HEAPMAXSIZE_HALF 0x80000000UL
2395
2396/* How many elements are stored in the linked list */
2397#define LINKEDLSTELS 8
2398
2399/* Minimum size of a chunk */
2400
[172]2401#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2402# define MINCHUNKSIZE 32
2403#else
2404# define MINCHUNKSIZE 16
2405#endif
2406
2407
[171]2408/* The amount of hashtable entries for each page:
2409Pagesize divded by the numer of elements in the linkedlists
2410divided by the minimum chunk size
2411*/
2412#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2413
2414/* The amount of hashtable entries needed to manage the memory:
2415Maximum heap size divided by page size multiplied by the amount
2416of chunk info's per page
2417*/
2418#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2419
2420/* Initial size of the map for the hashtable
2421Amount of entries muliplied by pointer size
2422*/
2423#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2424
2425/* Amount of free chunks that the system should allocate at the start */
2426#define NUMBER_FREE_CHUNKS 32768
2427
2428/* Initial size of the chunk info region,
2429also used when growing the region */
2430#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2431
2432/* Start address of the heap */
2433char *startheap;
2434
2435/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2436chunkinfoptr *hashtable;
2437
2438/* Current chunkinfo region */
2439struct cireginfo *currciinfo = 0;
2440struct cireginfo *firstciinfo = 0;
2441
2442unsigned long totalcictr = 0;
2443
2444
2445/* Initialize the area for chunkinfos and the hashtable and protect
2446 * it with non-writable pages
2447 */
2448static void
2449dnmalloc_init ()
2450{
2451 void *hashtb;
2452 int mprot;
2453 int flags = MAP_PRIVATE;
2454
2455 /* Allocate the malloc_state struct */
2456 malloc_mmap_state();
2457
2458 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2459 * systems use defered allocation anyway.
2460 */
2461#ifdef MAP_NORESERVE
2462 flags |= MAP_NORESERVE;
2463#endif
2464
2465 /* Always start at 0, hashtable covers whole 32bit address space
2466 */
2467#define STARTHEAP_IS_ZERO
2468 startheap = 0;
2469
2470 /* Map space for the hashtable */
2471#if PARANOIA > 1
2472 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2473#else
2474 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2475#endif
2476
2477#ifdef NDEBUG
2478 if (hashtb == MAP_FAILED) {
2479 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2480 abort ();
2481 }
2482#else
2483 assert(hashtb != MAP_FAILED);
2484#endif
2485
2486 /* Protect the hashtable with non-writable pages */
2487 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2488#ifdef NDEBUG
2489 if (mprot == -1) {
2490 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2491 strerror (errno));
2492 abort ();
2493 }
2494#else
2495 assert(mprot != -1);
2496#endif
2497
2498 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2499 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2500
2501 /* Protect the hashtable with non-writable pages */
2502#if PARANOIA > 1
2503 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2504#ifdef NDEBUG
2505 if (mprot == -1) {
2506 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2507 strerror (errno));
2508 abort ();
2509 }
2510#else
2511 assert(mprot != -1);
2512#endif
2513#endif
2514}
2515
2516
2517
2518/* Extend the region for chunk infos by mapping more memory before the region */
2519static void
2520cireg_extend ()
2521{
2522 void *newcireg;
2523 int mprot;
2524 struct cireginfo *tempciinfo = 0;
2525
2526#if PARANOIA > 1
2527 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2528#else
2529 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2530#endif
2531
2532#ifdef NDEBUG
2533 if (newcireg == MAP_FAILED)
2534 {
2535 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2536 strerror (errno));
2537 abort ();
2538 }
2539#else
2540 assert(newcireg != MAP_FAILED);
2541#endif
2542 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2543#ifdef NDEBUG
2544 if (mprot == -1) {
2545 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2546 strerror (errno));
2547 abort ();
2548 }
2549#else
2550 assert(mprot != -1);
2551#endif
2552 newcireg = ((char*)newcireg)+PGSIZE;
2553
2554#if PARANOIA > 1
2555 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2556#ifdef NDEBUG
2557 if (mprot == -1) {
2558 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2559 strerror (errno));
2560 abort ();
2561 }
2562#else
2563 assert(mprot != -1);
2564#endif
2565#endif
2566
2567 tempciinfo = currciinfo;
2568 currciinfo = (struct cireginfo *) newcireg;
2569 if (tempciinfo)
2570 tempciinfo->next = currciinfo;
2571 currciinfo->position = 1;
2572 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2573 if (!firstciinfo)
2574 firstciinfo = currciinfo;
2575 totalcictr++;
[172]2576 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
[171]2577}
2578
2579
2580/* Get a free chunkinfo */
2581static chunkinfoptr
2582cireg_getfree ()
2583{
2584 chunkinfoptr freeci;
2585 chunkinfoptr freelst = 0;
2586 struct cireginfo *newciinfo = firstciinfo;
2587
2588 if (newciinfo) {
2589 freelst = newciinfo->freelist;
2590
2591 if (!freelst && newciinfo->next) {
2592 do {
2593 newciinfo = newciinfo->next;
2594 freelst = newciinfo->freelist;
2595 } while (!freelst && newciinfo->next);
2596 }
2597 }
2598
2599 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2600 if (freelst)
2601 {
[172]2602 freeci = freelst;
2603 newciinfo->freecounter--;
2604 newciinfo->freelist = freelst->fd;
2605
2606 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2607 sizeof(struct chunkinfo));
2608
2609 freeci->prev_size = 0;
2610 freeci->size = 0;
2611 freeci->req = 0;
2612 freeci->hash_next = NULL;
2613 freeci->fd = NULL;
2614 freeci->bk = NULL;
2615 freeci->chunk = NULL;
2616 return (freeci);
[171]2617 }
2618 else
2619 {
2620 /* No free chunkinfos, check if chunkinfo region still has place
2621 * for a chunkinfo. If not, extend the region.
2622 */
2623 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2624 cireg_extend ();
2625 /* Get a chunkinfo from the chunkinfo region */
2626 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2627 currciinfo->freecounter--;
2628 currciinfo->position++;
[172]2629
2630 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2631 sizeof(struct chunkinfo));
2632
[171]2633 return (freeci);
2634 }
2635}
2636
2637static void freeciregion(struct cireginfo *freeme) {
2638 /* free the chunkinfo region */
2639 struct cireginfo *newciinfo = firstciinfo;
2640 struct cireginfo *prevciinfo = firstciinfo;
2641 void *unmapme;
[172]2642
[171]2643 while (newciinfo && newciinfo != freeme) {
2644 prevciinfo = newciinfo;
2645 newciinfo = newciinfo->next;
2646 }
2647 assert(freeme == newciinfo); /* rw */
2648 assert(newciinfo != NULL); /* rw */
2649 if (newciinfo)
2650 prevciinfo->next = newciinfo->next;
2651 unmapme = (void *) ((char*)freeme - PGSIZE);
[172]2652 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
[171]2653#if PARANOIA > 1
2654 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2655#else
2656 munmap(unmapme, CIREGSIZE+PGSIZE);
2657#endif
2658}
2659
2660
2661static void freecilst_add(chunkinfoptr p) {
2662
2663 struct cireginfo *newciinfo;
[172]2664
[171]2665 newciinfo = currciinfo;
2666 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2667 p->fd = newciinfo->freelist;
2668 newciinfo->freelist = p;
2669 newciinfo->freecounter++;
[172]2670 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2671 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2672 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2673 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2674 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2675 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2676 } else {
2677 newciinfo = firstciinfo;
2678 if (newciinfo) {
2679 do {
2680 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2681 p->fd = newciinfo->freelist;
2682 newciinfo->freelist = p;
2683 newciinfo->freecounter++;
[172]2684 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2685 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2686 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2687 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2688 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2689 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2690 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2691 freeciregion(newciinfo);
2692 break;
2693 }
2694 newciinfo = newciinfo->next;
2695 } while (newciinfo);
2696 }
2697 }
2698}
2699
2700/* Calculate the hash table entry for a chunk */
2701#ifdef STARTHEAP_IS_ZERO
2702#define hash(p) (((unsigned long) p) >> 7)
2703#else
2704#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2705#endif
2706
2707static void
2708hashtable_add (chunkinfoptr ci)
2709{
2710 chunkinfoptr temp, next;
2711 unsigned long hashval;
2712 mchunkptr cic = chunk (ci);
2713
2714 hashval = hash (cic);
2715
2716 if (hashval < AMOUNTHASH) {
2717
2718 temp = hashtable[hashval];
2719
2720#ifdef DNMALLOC_DEBUG
2721 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2722#endif
2723
2724 /* If no pointer to a chunk info list is stored at this location
2725 * in the hashtable or if the chunk's address is smaller than the
2726 * one present, add the chunk to the front of the linked list
2727 */
2728 if (temp == 0 || chunk (temp) > cic)
2729 {
2730 ci->hash_next = temp;
2731 hashtable[hashval] = ci;
2732 if (!temp) /* more likely case */
2733 goto out;
2734 temp->prev_size = chunksize(ci);
2735 return;
2736 }
2737 else
2738 {
2739 /* We must place the chunk in the linked list for this hashentry
2740 * Loop to end of list or to a position where temp's chunk's address
2741 * is larger than the new chunkinfo's chunk's address
2742 */
2743 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2744 {
2745 ci->hash_next = temp->hash_next;
2746 temp->hash_next = ci;
2747 }
2748 else
2749 {
2750 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2751 {
2752 temp = temp->hash_next;
2753 }
2754 /* Place in linked list if not already there */
2755 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2756 {
2757 ci->hash_next = temp->hash_next;
2758 temp->hash_next = ci;
2759 }
2760 }
2761 }
2762 }
2763 else {
2764#ifdef DNMALLOC_CHECKS
2765 if (hashval >= AMOUNTHASH) {
2766 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2767 abort();
2768 }
2769#else
2770 assert(hashval < AMOUNTHASH);
2771#endif
2772 }
2773
2774 out:
2775 next = next_chunkinfo(ci);
2776 if (!next)
2777 return;
2778 next->prev_size = chunksize(ci);
2779}
2780
2781static void
2782hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2783{
2784 chunkinfoptr next;
2785
2786#ifdef DNMALLOC_DEBUG
2787 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2788 (unsigned long)hash(chunk(ci_insert));
2789#endif
2790
2791 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2792 hashtable_add(ci_insert);
2793 }
2794 else {
[172]2795
[171]2796 ci_insert->hash_next = ci_orig->hash_next;
2797 ci_orig->hash_next = ci_insert;
2798
2799 /* added for prevsize */
2800 if (!(ci_insert->hash_next))
2801 next = next_chunkinfo(ci_insert);
2802 else
2803 next = ci_insert->hash_next;
2804
2805 if (!next)
2806 {
2807 ci_insert->prev_size = chunksize(ci_orig);
2808 }
2809 else
2810 {
2811 next->prev_size = chunksize(ci_insert);
2812 ci_insert->prev_size = chunksize(ci_orig);
2813 }
2814 }
2815}
2816
2817static void
2818hashtable_remove (mchunkptr p)
2819{
2820 chunkinfoptr prevtemp, temp;
2821 unsigned long hashval;
2822
2823 hashval = hash (p);
2824#ifdef DNMALLOC_DEBUG
2825 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2826#endif
2827 assert(hashval < AMOUNTHASH); /* rw */
2828 prevtemp = temp = hashtable[hashval];
2829 if (chunk (temp) == p) {
2830 hashtable[hashval] = temp->hash_next;
2831 }
2832 else
2833 {
2834 if (temp && chunk (temp) != p) {
2835 do
2836 {
2837 prevtemp = temp;
2838 temp = temp->hash_next;
2839 } while (temp && chunk (temp) != p);
2840 }
2841#ifdef DNMALLOC_CHECKS
2842 if (!temp) {
2843 fprintf (stderr,
2844 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2845 p, hashval);
2846 abort();
2847 }
2848#else
2849 assert(temp != NULL);
2850#endif
2851 prevtemp->hash_next = temp->hash_next;
2852 }
2853}
2854
[172]2855/* mmapped chunks are multiples of pagesize, no hash_nexts,
2856 * just remove from the hashtable
2857 */
[171]2858#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2859
2860static void
2861hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2862{
2863 unsigned long hashval;
2864 chunkinfoptr next;
2865
2866#ifdef DNMALLOC_DEBUG
[172]2867 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
[171]2868#endif
2869
2870 if (ci_orig->hash_next != ci_todelete) {
2871 hashval = hash(chunk(ci_todelete));
2872 assert(hashval < AMOUNTHASH); /* rw */
2873#ifdef DNMALLOC_CHECKS
2874 if (hashtable[hashval] != ci_todelete ) {
2875 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2876 }
2877#else
2878 assert(hashtable[hashval] == ci_todelete);
2879#endif
2880 hashtable[hashval] = ci_todelete->hash_next;
2881 }
2882
2883 else {
2884 ci_orig->hash_next = ci_todelete->hash_next;
2885 if (!ci_orig->hash_next) {
2886 next = next_chunkinfo(ci_orig);
2887 } else {
2888 next = ci_orig->hash_next;
2889 }
2890 if (next)
2891 next->prev_size = chunksize(ci_orig);
2892
2893 }
2894}
2895
2896
2897static chunkinfoptr
2898hashtable_lookup (mchunkptr p)
2899{
2900 chunkinfoptr ci;
2901 unsigned long hashval;
2902
2903 /* if we were called wrongly
2904 * if ((char *) p < startheap) return 0;
2905 */
2906 if ((char *) p >= startheap)
2907 {
2908 hashval = hash (p);
2909 assert(hashval < AMOUNTHASH); /* rw */
2910 ci = hashtable[hashval];
2911 if (ci && chunk (ci) == p)
2912 return ci;
2913
2914 if (ci) {
2915 do {
2916 ci = ci->hash_next;
2917 } while (ci && chunk (ci) != p);
2918 }
2919#ifdef DNMALLOC_CHECKS
2920 // This should never occur but if it does, we'd like to know
2921 if (!ci) {
2922 fprintf (stderr,
2923 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2924 p, hashval);
2925 abort();
2926 }
2927#else
2928 assert(ci != NULL);
2929#endif
2930 return ci;
2931 }
2932 return 0;
2933}
2934
2935
2936
2937/*
2938 ----------- Internal state representation and initialization -----------
2939*/
2940
2941struct malloc_state {
2942
2943 /* The maximum chunk size to be eligible for fastbin */
2944 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2945
2946 /* Fastbins */
2947 mfastbinptr fastbins[NFASTBINS];
2948
2949 /* Base of the topmost chunk -- not otherwise kept in a bin */
2950 chunkinfoptr top;
2951
2952 /* The remainder from the most recent split of a small request */
2953 chunkinfoptr last_remainder;
2954
2955 /* Normal bins */
2956 struct chunkinfo bins[NBINS];
2957
2958 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2959 unsigned int binmap[BINMAPSIZE+1];
2960
2961 /* Tunable parameters */
2962 CHUNK_SIZE_T trim_threshold;
2963 INTERNAL_SIZE_T top_pad;
2964 INTERNAL_SIZE_T mmap_threshold;
2965
2966 /* Memory map support */
2967 int n_mmaps;
2968 int n_mmaps_max;
2969 int max_n_mmaps;
2970
2971 /* Cache malloc_getpagesize */
2972 unsigned int pagesize;
2973
2974 /* Canary */
2975 char guard_stored[GUARD_SIZE];
2976
2977 /* Track properties of MORECORE */
2978 unsigned int morecore_properties;
2979
2980 /* Statistics */
2981 INTERNAL_SIZE_T mmapped_mem;
2982 INTERNAL_SIZE_T sbrked_mem;
2983 INTERNAL_SIZE_T max_sbrked_mem;
2984 INTERNAL_SIZE_T max_mmapped_mem;
2985 INTERNAL_SIZE_T max_total_mem;
2986};
2987
2988typedef struct malloc_state *mstate;
2989
2990/*
2991 There is exactly one instance of this struct in this malloc.
2992 If you are adapting this malloc in a way that does NOT use a static
2993 malloc_state, you MUST explicitly zero-fill it before using. This
2994 malloc relies on the property that malloc_state is initialized to
2995 all zeroes (as is true of C statics).
2996*/
2997
2998static struct malloc_state * av_ = NULL; /* never directly referenced */
2999
3000/*
3001 All uses of av_ are via get_malloc_state().
3002 At most one "call" to get_malloc_state is made per invocation of
3003 the public versions of malloc and free, but other routines
3004 that in turn invoke malloc and/or free may call more then once.
3005 Also, it is called in check* routines if DEBUG is set.
3006*/
3007
3008#define get_malloc_state() (av_)
3009
3010/*
3011 Initialize a malloc_state struct.
3012
3013 This is called only from within malloc_consolidate, which needs
3014 be called in the same contexts anyway. It is never called directly
3015 outside of malloc_consolidate because some optimizing compilers try
3016 to inline it at all call points, which turns out not to be an
3017 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3018*/
3019
3020#if __STD_C
3021static void malloc_mmap_state(void)
3022#else
3023static void malloc_mmap_state()
3024#endif
3025{
3026 int mprot;
3027 unsigned long pagesize = malloc_getpagesize;
3028 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3029
3030 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3031
3032
3033#ifdef NDEBUG
3034 if (foo == MAP_FAILED) {
3035 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3036 abort ();
3037 }
3038#else
3039 assert(foo != MAP_FAILED);
3040#endif
3041
3042 mprot = mprotect(foo, pagesize, PROT_NONE);
3043#ifdef NDEBUG
3044 if (mprot == -1) {
3045 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3046 strerror (errno));
3047 abort ();
3048 }
3049#else
3050 assert(mprot != -1);
3051#endif
3052
3053 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3054
3055 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3056
3057 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3058#ifdef NDEBUG
3059 if (mprot == -1) {
3060 fprintf (stderr,
3061 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3062 strerror (errno));
3063 abort ();
3064 }
3065#else
3066 assert(mprot != -1);
3067#endif
3068}
3069
3070#if __STD_C
3071static void malloc_init_state(mstate av)
3072#else
3073static void malloc_init_state(av) mstate av;
3074#endif
3075{
3076 int i;
3077 mbinptr bin;
3078
3079 void * morecore_test = MORECORE(0);
3080 unsigned long hashval;
3081
3082 /* Test morecore function
3083 */
3084 set_morecore32bit(av);
3085
3086 if (morecore_test == MORECORE_FAILURE)
3087 {
3088 set_nonmorecore32bit(av);
3089 }
3090 else
3091 {
3092 /* On 64bit systems, the heap may be located above the
3093 * 32bit address space. Since mmap() probably still can be
3094 * convinced to map within 32bit, we don't use sbrk().
3095 */
3096 hashval = hash (morecore_test);
3097 if (hashval >= AMOUNTHASH)
3098 {
3099 set_nonmorecore32bit(av);
3100 }
3101 }
3102
3103
3104 /* Establish circular links for normal bins */
3105 for (i = 1; i < NBINS; ++i) {
3106 bin = bin_at(av,i);
3107 bin->fd = bin->bk = bin;
3108 }
3109
3110 av->top_pad = DEFAULT_TOP_PAD;
3111 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3112 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3113 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3114
3115#if MORECORE_CONTIGUOUS
3116 set_contiguous(av);
3117#else
3118 set_noncontiguous(av);
3119#endif
3120
3121 set_max_fast(av, DEFAULT_MXFAST);
3122
3123 av->top = cireg_getfree ();
3124 av->top->chunk = (mchunkptr) startheap;
3125 av->top->size = 0;
3126 set_previnuse(av->top);
3127 clear_inuse(av->top);
3128 hashtable[0] = av->top;
3129 av->pagesize = malloc_getpagesize;
3130
3131 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3132
3133}
3134
3135/*
3136 Other internal utilities operating on mstates
3137*/
3138
3139#if __STD_C
3140static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3141static int sYSTRIm(size_t, mstate);
3142static void malloc_consolidate(mstate);
3143#else
3144static Void_t* sYSMALLOc();
3145static int sYSTRIm();
3146static void malloc_consolidate();
3147#endif
3148
3149/* dnmalloc functions */
3150/* needs mstate so moved here */
3151
3152static chunkinfoptr
3153next_chunkinfo (chunkinfoptr ci)
3154{
3155 mchunkptr nextp;
3156 unsigned long hashval;
3157 chunkinfoptr cinfonextp;
3158 mstate av = get_malloc_state();
3159
3160 /* ci is not the last element in the linked list, just
3161 return the next chunkinfo from the list
3162 */
3163 if (!ci->hash_next)
3164 {
3165 /* ci is the last element, find the next chunkinfo by
3166 * looking up the chunkinfo for the chunk that is after p's chunk
3167 */
3168 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3169
3170 if (!(nextp == av->top->chunk))
3171 {
3172 hashval = hash (nextp);
3173 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3174 cinfonextp = hashtable[hashval];
3175 if (cinfonextp && chunk (cinfonextp) == nextp)
3176 return cinfonextp;
3177
3178#ifdef DNMALLOC_CHECKS_EXTRA
3179 /* This seems bogus; a chunkinfo may legally have no nextp if
3180 * it's the last one allocated (?)
3181 */
3182 else {
3183 if (cinfonextp)
3184 fprintf (stderr,
3185 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3186 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3187 else
3188 fprintf (stderr,
3189 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3190 chunk(ci), hashval, "null", "null", nextp);
3191 }
3192#endif
3193
3194 return NULL;
3195 }
3196 else
3197 {
3198 return av->top;
3199 }
3200
3201 }
3202 else
3203 {
3204 return (ci->hash_next);
3205 }
3206}
3207
3208static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3209 mchunkptr nextp;
3210 if (oldp->hash_next == newp)
3211 return 1;
3212 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3213 if (nextp == chunk(newp))
3214 return 1;
3215 return 0;
3216}
3217
3218
3219
3220/* Get the chunkinfo of the physically previous chunk */
3221/* Since we disposed of prev_size, we need this function to find the previous */
3222
3223static chunkinfoptr
3224prev_chunkinfo (chunkinfoptr ci)
3225{
3226 unsigned int i;
3227 chunkinfoptr prev;
3228 mchunkptr prevchunk = 0;
3229 /* chunkinfoptr temp; */
3230
3231 /* Get the hashtable location of the chunkinfo */
3232 i = hash (chunk (ci));
3233 assert(i < AMOUNTHASH); /* rw */
3234
3235 /* Get the first element of the linked list of chunkinfo's that contains p */
3236 prev = hashtable[i];
3237
3238 if (ci == prev) {
3239 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3240 i = hash(prevchunk);
3241 assert(i < AMOUNTHASH); /* rw */
3242 /* Loop over the linked list until we reach the last element */
3243 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3244 } else {
3245 /* p is not the first element in the linked list, we can just
3246 loop over the list and return the previous
3247 */
3248 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3249 }
3250
3251 return prev;
3252}
3253
3254
3255/*
3256 Debugging support
3257 Dnmalloc broke dlmallocs debugging functions, should fix them some
3258 time in the future, for now leave them undefined.
3259*/
3260
3261#define check_chunk(P)
3262#define check_free_chunk(P)
3263#define check_inuse_chunk(P)
3264#define check_remalloced_chunk(P,N)
3265#define check_malloced_chunk(P,N)
3266#define check_malloc_state()
3267
3268
3269/* ----------- Routines dealing with system allocation -------------- */
3270
3271/*
3272 sysmalloc handles malloc cases requiring more memory from the system.
3273 On entry, it is assumed that av->top does not have enough
3274 space to service request for nb bytes, thus requiring that av->top
3275 be extended or replaced.
3276*/
3277
3278#if __STD_C
3279static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3280#else
3281static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3282#endif
3283{
3284 chunkinfoptr old_top; /* incoming value of av->top */
3285 INTERNAL_SIZE_T old_size; /* its size */
3286 char* old_end; /* its end address */
3287
3288 long size; /* arg to first MORECORE or mmap call */
3289 char* brk; /* return value from MORECORE */
3290
3291 long correction; /* arg to 2nd MORECORE call */
3292 char* snd_brk; /* 2nd return val */
3293
3294 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3295 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3296 char* aligned_brk; /* aligned offset into brk */
3297
3298 chunkinfoptr p; /* the allocated/returned chunk */
3299 chunkinfoptr remainder; /* remainder from allocation */
3300 chunkinfoptr fencepost; /* fencepost */
3301 CHUNK_SIZE_T remainder_size; /* its size */
3302
3303 CHUNK_SIZE_T sum; /* for updating stats */
3304
3305 size_t pagemask = av->pagesize - 1;
3306
3307#ifdef DNMALLOC_DEBUG
3308 fprintf(stderr, "Enter sysmalloc\n");
3309#endif
3310 /*
3311 If there is space available in fastbins, consolidate and retry
3312 malloc from scratch rather than getting memory from system. This
3313 can occur only if nb is in smallbin range so we didn't consolidate
3314 upon entry to malloc. It is much easier to handle this case here
3315 than in malloc proper.
3316 */
3317
3318
3319 if (have_fastchunks(av)) {
[172]3320 Void_t * retval;
[171]3321 assert(in_smallbin_range(nb));
3322 malloc_consolidate(av);
3323#ifdef DNMALLOC_DEBUG
3324 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3325#endif
[172]3326 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3327 VALGRIND_FREELIKE_BLOCK(retval, 0);
3328 return retval;
[171]3329 }
3330
3331
3332 /*
3333 If have mmap, and the request size meets the mmap threshold, and
3334 the system supports mmap, and there are few enough currently
3335 allocated mmapped regions, try to directly map this request
3336 rather than expanding top.
3337 */
3338
3339 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3340 (av->n_mmaps < av->n_mmaps_max))) {
3341
3342 char* mm; /* return value from mmap call*/
3343
3344 /*
3345 Round up size to nearest page. For mmapped chunks, the overhead
3346 is one SIZE_SZ unit larger than for normal chunks, because there
3347 is no following chunk whose prev_size field could be used.
3348 */
3349 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3350
3351 /* Don't try if size wraps around 0 */
3352 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3353
3354
3355 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3356
3357 if (mm != (char*)(MORECORE_FAILURE)) {
3358
[172]3359 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3360
[171]3361 /*
3362 The offset to the start of the mmapped region is stored
3363 in the prev_size field of the chunk. This allows us to adjust
3364 returned start address to meet alignment requirements here
3365 and in memalign(), and still be able to compute proper
3366 address argument for later munmap in free() and realloc().
3367 */
3368
3369 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3370 p = cireg_getfree();
3371
3372 if (front_misalign > 0) {
3373 correction = MALLOC_ALIGNMENT - front_misalign;
3374 p->chunk = (mchunkptr)(mm + correction);
3375 p->hash_next = (chunkinfoptr) correction;
3376 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3377 }
3378 else {
3379 p->chunk = (mchunkptr)mm;
3380 p->hash_next = 0;
3381 set_head(p, size|INUSE|IS_MMAPPED);
3382 }
3383 hashtable_add(p);
3384 /* update statistics */
3385
3386 if (++av->n_mmaps > av->max_n_mmaps)
3387 av->max_n_mmaps = av->n_mmaps;
3388
3389 sum = av->mmapped_mem += size;
3390 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3391 av->max_mmapped_mem = sum;
3392 sum += av->sbrked_mem;
3393 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3394 av->max_total_mem = sum;
3395
3396 check_chunk(p);
3397
3398#ifdef DNMALLOC_DEBUG
3399 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3400 size, (unsigned long)/* size_t */av->max_total_mem );
3401#endif
3402 return chunk(p);
3403 }
3404 }
3405 }
3406
3407 /* Record incoming configuration of top */
3408
3409 old_top = av->top;
3410 old_size = chunksize(old_top);
3411 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3412
3413 brk = snd_brk = (char*)(MORECORE_FAILURE);
3414
3415 /*
3416 If not the first time through, we require old_size to be
3417 at least MINSIZE and to have prev_inuse set.
3418 */
3419
3420 /* assert((old_top == initial_top(av) && old_size == 0) ||
3421 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3422 prev_inuse(old_top))); */
3423
3424 /* Precondition: not enough current space to satisfy nb request */
3425 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3426
3427 /* Precondition: all fastbins are consolidated */
3428 assert(!have_fastchunks(av));
3429
3430 /* Request enough space for nb + pad + overhead */
3431 size = nb + av->top_pad + MINSIZE;
3432
3433 /*
3434 If contiguous, we can subtract out existing space that we hope to
3435 combine with new space. We add it back later only if
3436 we don't actually get contiguous space.
3437 */
3438 if (contiguous(av))
3439 size -= old_size;
3440
3441 /*
3442 Round to a multiple of page size.
3443 If MORECORE is not contiguous, this ensures that we only call it
3444 with whole-page arguments. And if MORECORE is contiguous and
3445 this is not first time through, this preserves page-alignment of
3446 previous calls. Otherwise, we correct to page-align below.
3447 */
3448
3449 size = (size + pagemask) & ~pagemask;
3450
3451 /*
3452 Don't try to call MORECORE if argument is so big as to appear
3453 negative. Note that since mmap takes size_t arg, it may succeed
3454 below even if we cannot call MORECORE.
3455 */
3456 if (size > 0 && morecore32bit(av))
3457 brk = (char*)(MORECORE(size));
3458
3459 /*
3460 If have mmap, try using it as a backup when MORECORE fails or
3461 cannot be used. This is worth doing on systems that have "holes" in
3462 address space, so sbrk cannot extend to give contiguous space, but
3463 space is available elsewhere. Note that we ignore mmap max count
3464 and threshold limits, since the space will not be used as a
3465 segregated mmap region.
3466 */
3467 if (brk != (char*)(MORECORE_FAILURE)) {
3468 av->sbrked_mem += size;
[172]3469 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
[171]3470 }
3471
3472 else {
3473
3474#ifdef DNMALLOC_DEBUG
3475 fprintf(stderr, "Morecore failure in sysmalloc\n");
3476#endif
3477
3478 /* Cannot merge with old top, so add its size back in */
3479 if (contiguous(av))
3480 size = (size + old_size + pagemask) & ~pagemask;
3481
3482 /* If we are relying on mmap as backup, then use larger units */
3483 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3484 size = MMAP_AS_MORECORE_SIZE;
3485
3486 /* Don't try if size wraps around 0 */
3487 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3488
3489#ifdef DNMALLOC_DEBUG
3490 fprintf(stderr, "Try mmap in sysmalloc\n");
3491#endif
3492 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3493
3494 if (brk != (char*)(MORECORE_FAILURE)) {
3495
[172]3496 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3497
[171]3498 av->mmapped_mem += size;
3499#ifdef DNMALLOC_DEBUG
3500 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3501#endif
3502
3503 /* We do not need, and cannot use, another sbrk call to find end */
3504 snd_brk = brk + size;
3505
3506 /*
3507 Record that we no longer have a contiguous sbrk region.
3508 After the first time mmap is used as backup, we do not
3509 ever rely on contiguous space since this could incorrectly
3510 bridge regions.
3511 */
3512 set_noncontiguous(av);
3513 }
3514 }
3515 }
3516
3517 if (brk != (char*)(MORECORE_FAILURE)) {
3518#ifdef DNMALLOC_DEBUG
3519 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3520 size, (unsigned long)av->sbrked_mem);
3521#endif
3522 /* av->sbrked_mem += size; moved up */
3523
3524 /*
3525 If MORECORE extends previous space, we can likewise extend top size.
3526 */
3527
3528 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3529 set_head(old_top, (size + old_size) | PREV_INUSE);
3530#ifdef DNMALLOC_DEBUG
3531 fprintf(stderr, "Previous space extended\n");
3532#endif
3533 }
3534
3535 /*
3536 Otherwise, make adjustments:
3537
3538 * If the first time through or noncontiguous, we need to call sbrk
3539 just to find out where the end of memory lies.
3540
3541 * We need to ensure that all returned chunks from malloc will meet
3542 MALLOC_ALIGNMENT
3543
3544 * If there was an intervening foreign sbrk, we need to adjust sbrk
3545 request size to account for fact that we will not be able to
3546 combine new space with existing space in old_top.
3547
3548 * Almost all systems internally allocate whole pages at a time, in
3549 which case we might as well use the whole last page of request.
3550 So we allocate enough more memory to hit a page boundary now,
3551 which in turn causes future contiguous calls to page-align.
3552 */
3553
3554 else {
3555 front_misalign = 0;
3556 end_misalign = 0;
3557 correction = 0;
3558 aligned_brk = brk;
3559
3560 /*
3561 If MORECORE returns an address lower than we have seen before,
3562 we know it isn't really contiguous. This and some subsequent
3563 checks help cope with non-conforming MORECORE functions and
3564 the presence of "foreign" calls to MORECORE from outside of
3565 malloc or by other threads. We cannot guarantee to detect
3566 these in all cases, but cope with the ones we do detect.
3567 */
3568 if (contiguous(av) && old_size != 0 && brk < old_end) {
3569 set_noncontiguous(av);
3570 }
3571
3572 /* handle contiguous cases */
3573 if (contiguous(av)) {
3574
3575#ifdef DNMALLOC_DEBUG
3576 fprintf(stderr, "Handle contiguous cases\n");
3577#endif
3578 /*
3579 We can tolerate forward non-contiguities here (usually due
3580 to foreign calls) but treat them as part of our space for
3581 stats reporting.
3582 */
3583 if (old_size != 0)
3584 av->sbrked_mem += brk - old_end;
3585
3586 /* Guarantee alignment of first new chunk made from this space */
3587
3588 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3589 if (front_misalign > 0) {
3590
3591 /*
3592 Skip over some bytes to arrive at an aligned position.
3593 We don't need to specially mark these wasted front bytes.
3594 They will never be accessed anyway because
3595 prev_inuse of av->top (and any chunk created from its start)
3596 is always true after initialization.
3597 */
3598
3599 correction = MALLOC_ALIGNMENT - front_misalign;
3600 aligned_brk += correction;
3601 }
3602
3603 /*
3604 If this isn't adjacent to existing space, then we will not
3605 be able to merge with old_top space, so must add to 2nd request.
3606 */
3607
3608 correction += old_size;
3609
3610 /* Extend the end address to hit a page boundary */
3611 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3612 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3613
3614 assert(correction >= 0);
3615 snd_brk = (char*)(MORECORE(correction));
3616
3617 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3618 /*
3619 If can't allocate correction, try to at least find out current
3620 brk. It might be enough to proceed without failing.
3621 */
3622 correction = 0;
3623 snd_brk = (char*)(MORECORE(0));
3624 }
3625 else if (snd_brk < brk) {
3626 /*
3627 If the second call gives noncontiguous space even though
3628 it says it won't, the only course of action is to ignore
3629 results of second call, and conservatively estimate where
3630 the first call left us. Also set noncontiguous, so this
3631 won't happen again, leaving at most one hole.
3632
3633 Note that this check is intrinsically incomplete. Because
3634 MORECORE is allowed to give more space than we ask for,
3635 there is no reliable way to detect a noncontiguity
3636 producing a forward gap for the second call.
3637 */
3638 snd_brk = brk + size;
3639 correction = 0;
3640 set_noncontiguous(av);
3641 }
[172]3642 else {
3643 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3644 }
[171]3645
3646 }
3647
3648 /* handle non-contiguous cases */
3649 else {
3650
3651#ifdef DNMALLOC_DEBUG
3652 fprintf(stderr, "Handle non-contiguous cases\n");
3653#endif
3654
3655 /* MORECORE/mmap must correctly align */
3656 assert(aligned_OK(brk));
3657
3658 /* Find out current end of memory */
3659 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3660 snd_brk = (char*)(MORECORE(0));
3661 av->sbrked_mem += snd_brk - brk - size;
3662 }
3663#ifdef DNMALLOC_DEBUG
3664 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3665#endif
3666 }
3667
3668 /* Adjust top based on results of second sbrk.
3669 *
3670 * If mmap() has been used as backup for failed morecore(),
3671 * we end up in this branch as well.
3672 */
3673 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3674#ifdef DNMALLOC_DEBUG
3675 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3676#endif
3677 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3678 av->top = cireg_getfree();
3679 av->top->chunk = (mchunkptr)aligned_brk;
3680 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3681#ifdef DNMALLOC_DEBUG
3682 fprintf(stderr, "Adjust top, top %p size %lu\n",
3683 av->top, (unsigned long)chunksize(av->top));
3684#endif
3685 hashtable_add(av->top);
3686 av->sbrked_mem += correction;
3687
3688 /*
3689 If not the first time through, we either have a
3690 gap due to foreign sbrk or a non-contiguous region. Insert a
3691 double fencepost at old_top to prevent consolidation with space
3692 we don't own. These fenceposts are artificial chunks that are
3693 marked as inuse. Original dlmalloc had two of these but too
3694 small to use. To ensure that the linked lists contain a maximum
3695 of 8 elements we only use 1. Inuse is determined by the
3696 current rather than the next chunk anyway.
3697 */
3698
3699 if (old_size != 0) {
3700#ifdef DNMALLOC_DEBUG
3701 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3702#endif
3703 /*
3704 Shrink old_top to insert fenceposts, keeping size a
3705 multiple of MALLOC_ALIGNMENT. We know there is at least
3706 enough space in old_top to do this.
3707 */
3708#ifdef DNMALLOC_DEBUG
3709 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3710 old_top, (unsigned long)old_size);
3711#endif
3712 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3713 set_head(old_top, old_size | PREV_INUSE);
3714#ifdef DNMALLOC_DEBUG
3715 fprintf(stderr, "Adjust top, old_size after %lu\n",
3716 (unsigned long)old_size);
3717#endif
3718
3719 /*
3720 Note that the following assignments completely overwrite
3721 old_top when old_size was previously MINSIZE. This is
3722 intentional. We need the fencepost, even if old_top otherwise gets
3723 lost.
3724 */
3725 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3726 it's marked inuse it will never be coalesced
3727 */
3728 fencepost = cireg_getfree();
3729 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3730 old_size);
3731 fencepost->size = 16|INUSE|PREV_INUSE;
3732 hashtable_add(fencepost);
3733 /*
3734 If possible, release the rest, suppressing trimming.
3735 */
3736 if (old_size >= MINSIZE) {
3737 INTERNAL_SIZE_T tt = av->trim_threshold;
3738#ifdef DNMALLOC_DEBUG
3739 fprintf(stderr, "Release\n");
3740#endif
3741 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3742 set_head(old_top, old_size | PREV_INUSE | INUSE);
3743 guard_set(av->guard_stored, old_top, 0, old_size);
[172]3744 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
[171]3745 fREe(chunk(old_top));
3746 av->trim_threshold = tt;
3747#ifdef DNMALLOC_DEBUG
3748 fprintf(stderr, "Release done\n");
3749#endif
3750 }
3751
3752#ifdef DNMALLOC_DEBUG
3753 fprintf(stderr, "Adjust top, size %lu\n",
3754 (unsigned long)chunksize(av->top));
3755#endif
3756
3757 } /* fenceposts */
3758 } /* adjust top */
3759 } /* not extended previous region */
3760
3761 /* Update statistics */ /* FIXME check this */
3762 sum = av->sbrked_mem;
3763 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3764 av->max_sbrked_mem = sum;
3765
3766 sum += av->mmapped_mem;
3767 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3768 av->max_total_mem = sum;
3769
3770 check_malloc_state();
3771
3772 /* finally, do the allocation */
3773
3774 p = av->top;
3775 size = chunksize(p);
3776
3777#ifdef DNMALLOC_DEBUG
3778 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3779 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3780#endif
3781
3782 /* check that one of the above allocation paths succeeded */
3783 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3784 remainder_size = size - nb;
3785 remainder = cireg_getfree();
3786 remainder->chunk = chunk_at_offset(chunk(p), nb);
3787 av->top = remainder;
3788 set_head(p, nb | PREV_INUSE | INUSE);
3789 set_head(remainder, remainder_size | PREV_INUSE);
3790 hashtable_insert (p, av->top);
3791 check_malloced_chunk(p, nb);
3792#ifdef DNMALLOC_DEBUG
3793 fprintf(stderr, "Return any (total %lu)\n",
3794 (unsigned long)/* size_t */av->max_total_mem );
3795#endif
3796 return chunk(p);
3797 }
3798
3799 }
3800
3801#ifdef DNMALLOC_DEBUG
3802 fprintf(stderr, "Return failed (total %lu)\n",
3803 (unsigned long)/* size_t */av->max_total_mem );
3804#endif
3805
3806 /* catch all failure paths */
3807 MALLOC_FAILURE_ACTION;
3808 return 0;
3809}
3810
3811
3812
3813
3814/*
3815 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3816 to the system (via negative arguments to sbrk) if there is unused
3817 memory at the `high' end of the malloc pool. It is called
3818 automatically by free() when top space exceeds the trim
3819 threshold. It is also called by the public malloc_trim routine. It
3820 returns 1 if it actually released any memory, else 0.
3821*/
3822
3823#if __STD_C
3824static int sYSTRIm(size_t pad, mstate av)
3825#else
3826static int sYSTRIm(pad, av) size_t pad; mstate av;
3827#endif
3828{
3829 long top_size; /* Amount of top-most memory */
3830 long extra; /* Amount to release */
3831 long released; /* Amount actually released */
3832 char* current_brk; /* address returned by pre-check sbrk call */
3833 char* new_brk; /* address returned by post-check sbrk call */
3834 size_t pagesz;
3835
3836 pagesz = av->pagesize;
3837 top_size = chunksize(av->top);
3838
3839 /* Release in pagesize units, keeping at least one page */
3840 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3841
3842 if (extra > 0) {
3843
3844 /*
3845 Only proceed if end of memory is where we last set it.
3846 This avoids problems if there were foreign sbrk calls.
3847 */
3848 current_brk = (char*)(MORECORE(0));
3849 if (current_brk == (char*)(av->top) + top_size) {
3850
3851 /*
3852 Attempt to release memory. We ignore MORECORE return value,
3853 and instead call again to find out where new end of memory is.
3854 This avoids problems if first call releases less than we asked,
3855 of if failure somehow altered brk value. (We could still
3856 encounter problems if it altered brk in some very bad way,
3857 but the only thing we can do is adjust anyway, which will cause
3858 some downstream failure.)
3859 */
3860
3861 MORECORE(-extra);
3862 new_brk = (char*)(MORECORE(0));
3863
3864 if (new_brk != (char*)MORECORE_FAILURE) {
3865 released = (long)(current_brk - new_brk);
3866
3867 if (released != 0) {
3868 /* Success. Adjust top. */
3869 av->sbrked_mem -= released;
3870 set_head(av->top, (top_size - released) | PREV_INUSE);
3871 check_malloc_state();
3872 return 1;
3873 }
3874 }
3875 }
3876 }
3877 return 0;
3878}
3879
3880/*
3881 ------------------------------ malloc ------------------------------
3882*/
3883
3884
3885#if __STD_C
3886DL_STATIC Void_t* mALLOc(size_t bytes)
3887#else
3888DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3889#endif
3890{
3891 mstate av = get_malloc_state();
3892
3893 INTERNAL_SIZE_T nb; /* normalized request size */
3894 unsigned int idx; /* associated bin index */
3895 mbinptr bin; /* associated bin */
3896 mfastbinptr* fb; /* associated fastbin */
3897
3898 chunkinfoptr victim; /* inspected/selected chunk */
3899 INTERNAL_SIZE_T size; /* its size */
3900 int victim_index; /* its bin index */
3901
3902 chunkinfoptr remainder; /* remainder from a split */
3903 CHUNK_SIZE_T remainder_size; /* its size */
3904
3905 unsigned int block; /* bit map traverser */
3906 unsigned int bit; /* bit map traverser */
3907 unsigned int map; /* current word of binmap */
3908
3909 chunkinfoptr fwd; /* misc temp for linking */
3910 chunkinfoptr bck; /* misc temp for linking */
3911
3912 Void_t* retval;
3913
3914 /* chunkinfoptr next; */
3915
3916
3917 /*
3918 Convert request size to internal form by adding SIZE_SZ bytes
3919 overhead plus possibly more to obtain necessary alignment and/or
3920 to obtain a size of at least MINSIZE, the smallest allocatable
3921 size. Also, checked_request2size traps (returning 0) request sizes
3922 that are so large that they wrap around zero when padded and
3923 aligned.
3924 */
3925#if defined(SH_CUTEST)
3926 extern int malloc_count;
3927 ++malloc_count;
3928#endif
3929
3930 checked_request2size(bytes, nb);
3931
3932 /*
3933 Bypass search if no frees yet
3934 */
3935 if (av && have_anychunks(av)) {
3936 goto av_initialized;
3937 }
3938 else {
3939 if (!av || av->max_fast == 0) { /* initialization check */
3940 malloc_consolidate(av);
3941 av = get_malloc_state();
3942 }
3943 goto use_top;
3944 }
3945
3946 av_initialized:
3947
3948 /*
3949 If the size qualifies as a fastbin, first check corresponding bin.
3950 */
3951 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3952 fb = &(av->fastbins[(fastbin_index(nb))]);
3953 if ( (victim = *fb) != 0) {
3954 *fb = victim->fd;
3955 check_remalloced_chunk(victim, nb);
3956 guard_set(av->guard_stored, victim, bytes, nb);
[172]3957 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3958 return chunk(victim);
3959 }
3960 }
3961
3962 /*
3963 If a small request, check regular bin. Since these "smallbins"
3964 hold one size each, no searching within bins is necessary.
3965 (For a large request, we need to wait until unsorted chunks are
3966 processed to find best fit. But for small ones, fits are exact
3967 anyway, so we can check now, which is faster.)
3968 */
3969
3970 if (in_smallbin_range(nb)) {
3971 idx = smallbin_index(nb);
3972 bin = bin_at(av,idx);
3973
3974 if ((victim = last(bin)) != bin) {
3975 bck = victim->bk;
3976 bin->bk = bck;
3977 bck->fd = bin;
3978
3979 set_all_inuse(victim);
3980
3981 check_malloced_chunk(victim, nb);
3982 guard_set(av->guard_stored, victim, bytes, nb);
[172]3983 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3984 return chunk(victim);
3985 }
3986 }
3987
3988 /*
3989 If this is a large request, consolidate fastbins before continuing.
3990 While it might look excessive to kill all fastbins before
3991 even seeing if there is space available, this avoids
3992 fragmentation problems normally associated with fastbins.
3993 Also, in practice, programs tend to have runs of either small or
3994 large requests, but less often mixtures, so consolidation is not
3995 invoked all that often in most programs. And the programs that
3996 it is called frequently in otherwise tend to fragment.
3997 */
3998
3999 else {
4000 idx = largebin_index(nb);
4001 if (have_fastchunks(av))
4002 malloc_consolidate(av);
4003 }
4004
4005 /*
4006 Process recently freed or remaindered chunks, taking one only if
4007 it is exact fit, or, if this a small request, the chunk is remainder from
4008 the most recent non-exact fit. Place other traversed chunks in
4009 bins. Note that this step is the only place in any routine where
4010 chunks are placed in bins.
4011 */
4012
4013 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4014 bck = victim->bk;
4015 size = chunksize(victim);
4016
4017 /*
4018 If a small request, try to use last remainder if it is the
4019 only chunk in unsorted bin. This helps promote locality for
4020 runs of consecutive small requests. This is the only
4021 exception to best-fit, and applies only when there is
4022 no exact fit for a small chunk.
4023 */
4024
4025 if (UNLIKELY(in_smallbin_range(nb) &&
4026 bck == unsorted_chunks(av) &&
4027 victim == av->last_remainder &&
4028 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4029
4030 /* split and reattach remainder */
4031 remainder_size = size - nb;
4032 remainder = cireg_getfree();
4033 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4034 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4035 av->last_remainder = remainder;
4036 remainder->bk = remainder->fd = unsorted_chunks(av);
4037
4038 set_head(victim, nb | PREV_INUSE|INUSE);
4039 set_head(remainder, remainder_size | PREV_INUSE);
4040 hashtable_insert(victim, remainder);
4041
4042 check_malloced_chunk(victim, nb);
4043 guard_set(av->guard_stored, victim, bytes, nb);
[172]4044 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4045 return chunk(victim);
4046 }
4047
4048 /* remove from unsorted list */
4049 unsorted_chunks(av)->bk = bck;
4050 bck->fd = unsorted_chunks(av);
4051
4052 /* Take now instead of binning if exact fit */
4053
4054 if (UNLIKELY(size == nb)) {
4055 set_all_inuse(victim)
4056 check_malloced_chunk(victim, nb);
4057 guard_set(av->guard_stored, victim, bytes, nb);
[172]4058 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4059 return chunk(victim);
4060 }
4061
4062 /* place chunk in bin */
4063
4064 if (in_smallbin_range(size)) {
4065
4066 victim_index = smallbin_index(size);
4067 bck = bin_at(av, victim_index);
4068 fwd = bck->fd;
4069 }
4070 else {
4071 victim_index = largebin_index(size);
4072 bck = bin_at(av, victim_index);
4073 fwd = bck->fd;
4074
4075 if (UNLIKELY(fwd != bck)) {
4076 /* if smaller than smallest, place first */
4077 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4078 fwd = bck;
4079 bck = bck->bk;
4080 }
4081 else if ((CHUNK_SIZE_T)(size) >=
4082 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4083
4084 /* maintain large bins in sorted order */
4085 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4086 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4087 fwd = fwd->fd;
4088 bck = fwd->bk;
4089 }
4090 }
4091 }
4092
4093 mark_bin(av, victim_index);
4094 victim->bk = bck;
4095 victim->fd = fwd;
4096 fwd->bk = victim;
4097 bck->fd = victim;
4098 }
4099
4100 /*
4101 If a large request, scan through the chunks of current bin to
4102 find one that fits. (This will be the smallest that fits unless
4103 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4104 the only step where an unbounded number of chunks might be
4105 scanned without doing anything useful with them. However the
4106 lists tend to be short.
4107 */
4108
4109 if (!in_smallbin_range(nb)) {
4110 bin = bin_at(av, idx);
4111
4112 victim = last(bin);
4113
4114 if (UNLIKELY(victim != bin)) {
4115
4116 do {
4117 size = chunksize(victim);
4118
4119 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4120 remainder_size = size - nb;
4121 unlink(victim, bck, fwd);
4122
4123 /* Split */
4124 if (remainder_size >= MINSIZE) {
4125 remainder = cireg_getfree();
4126 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4127 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4128 remainder->bk = remainder->fd = unsorted_chunks(av);
4129 set_head(victim, nb | PREV_INUSE | INUSE);
4130 set_head(remainder, remainder_size | PREV_INUSE);
4131 hashtable_insert(victim, remainder);
4132 check_malloced_chunk(victim, nb);
4133 guard_set(av->guard_stored, victim, bytes, nb);
[172]4134 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4135 return chunk(victim);
4136 }
4137 /* Exhaust */
4138 else {
4139 set_all_inuse(victim);
4140 check_malloced_chunk(victim, nb);
4141 guard_set(av->guard_stored, victim, bytes, nb);
[172]4142 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4143 return chunk(victim);
4144 }
4145 }
4146 victim = victim->bk;
4147 } while(victim != bin);
4148 }
4149 }
4150
4151 /*
4152 Search for a chunk by scanning bins, starting with next largest
4153 bin. This search is strictly by best-fit; i.e., the smallest
4154 (with ties going to approximately the least recently used) chunk
4155 that fits is selected.
4156
4157 The bitmap avoids needing to check that most blocks are nonempty.
4158 */
4159
4160
4161 ++idx;
4162 bin = bin_at(av,idx);
4163 block = idx2block(idx);
4164 map = av->binmap[block];
4165 bit = idx2bit(idx);
4166
4167 for (;;) {
4168
4169 /* Skip rest of block if there are no more set bits in this block. */
4170 if (bit > map || bit == 0) {
4171 do {
4172 if (++block >= BINMAPSIZE) /* out of bins */
4173 goto use_top;
4174 } while ( (map = av->binmap[block]) == 0);
4175
4176 bin = bin_at(av, (block << BINMAPSHIFT));
4177 bit = 1;
4178 }
4179
4180 /* Advance to bin with set bit. There must be one. */
4181 while ((bit & map) == 0) {
4182 bin = next_bin(bin);
4183 bit <<= 1;
4184 assert(bit != 0);
4185 }
4186
4187 /* Inspect the bin. It is likely to be non-empty */
4188 victim = last(bin);
4189
4190 /* If a false alarm (empty bin), clear the bit. */
4191 if (victim == bin) {
4192 av->binmap[block] = map &= ~bit; /* Write through */
4193 bin = next_bin(bin);
4194 bit <<= 1;
4195 }
4196
4197 else {
4198 size = chunksize(victim);
4199
4200 /* We know the first chunk in this bin is big enough to use. */
4201 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4202
4203 remainder_size = size - nb;
4204
4205 /* unlink */
4206 bck = victim->bk;
4207 bin->bk = bck;
4208 bck->fd = bin;
4209
4210 /* Split */
4211 if (remainder_size >= MINSIZE) {
4212 remainder = cireg_getfree();
4213 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4214
4215 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4216 remainder->bk = remainder->fd = unsorted_chunks(av);
4217 /* advertise as last remainder */
[172]4218 if (in_smallbin_range(nb))
4219 av->last_remainder = remainder;
[171]4220
4221 set_head(victim, nb | PREV_INUSE | INUSE);
4222 set_head(remainder, remainder_size | PREV_INUSE);
4223 hashtable_insert(victim, remainder);
4224 check_malloced_chunk(victim, nb);
4225 guard_set(av->guard_stored, victim, bytes, nb);
[172]4226 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4227 return chunk(victim);
4228 }
4229 /* Exhaust */
4230 else {
4231 set_all_inuse(victim);
4232 check_malloced_chunk(victim, nb);
4233 guard_set(av->guard_stored, victim, bytes, nb);
[172]4234 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4235 return chunk(victim);
4236 }
4237
4238 }
4239 }
4240
4241 use_top:
4242
4243
4244 /*
4245 If large enough, split off the chunk bordering the end of memory
4246 (held in av->top). Note that this is in accord with the best-fit
4247 search rule. In effect, av->top is treated as larger (and thus
4248 less well fitting) than any other available chunk since it can
4249 be extended to be as large as necessary (up to system
4250 limitations).
4251
4252 We require that av->top always exists (i.e., has size >=
4253 MINSIZE) after initialization, so if it would otherwise be
4254 exhuasted by current request, it is replenished. (The main
4255 reason for ensuring it exists is that we may need MINSIZE space
4256 to put in fenceposts in sysmalloc.)
4257 */
4258
4259 victim = av->top;
4260 size = chunksize(victim);
4261
4262 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
[172]4263 remainder = cireg_getfree();
[171]4264 remainder_size = size - nb;
4265 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4266 av->top = remainder;
4267 set_head(victim, nb | PREV_INUSE | INUSE);
4268 set_head(remainder, remainder_size | PREV_INUSE);
4269 hashtable_insert(victim, remainder);
4270 check_malloced_chunk(victim, nb);
4271 guard_set(av->guard_stored, victim, bytes, nb);
[172]4272 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4273 return chunk(victim);
4274 }
4275
4276 /*
4277 If no space in top, relay to handle system-dependent cases
4278 */
4279 retval = sYSMALLOc(nb, av);
4280 if (retval) {
4281 victim = mem2chunk(retval);
4282 guard_set(av->guard_stored, victim, bytes, nb);
4283 }
[172]4284 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
[171]4285 return retval;
4286}
4287
4288/*
4289 ------------------------------ free ------------------------------
4290*/
4291
4292#if __STD_C
4293DL_STATIC void fREe(Void_t* mem)
4294#else
4295DL_STATIC void fREe(mem) Void_t* mem;
4296#endif
4297{
4298 mstate av = get_malloc_state();
4299
4300 chunkinfoptr p; /* chunk corresponding to mem */
4301 INTERNAL_SIZE_T size; /* its size */
4302 mfastbinptr* fb; /* associated fastbin */
4303 chunkinfoptr prevchunk; /* previous physical chunk */
4304 chunkinfoptr nextchunk; /* next contiguous chunk */
4305 INTERNAL_SIZE_T nextsize; /* its size */
4306 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4307 chunkinfoptr bck; /* misc temp for linking */
4308 chunkinfoptr fwd; /* misc temp for linking */
4309 chunkinfoptr next;
4310#if defined(SH_CUTEST)
4311 extern int malloc_count;
4312 --malloc_count;
4313#endif
4314
4315 /* free(0) has no effect */
4316 if (mem != 0) {
4317 p = hashtable_lookup(mem);
4318 /* check that memory is managed by us
4319 * and is inuse
4320 */
4321 if (UNLIKELY(!p || !inuse(p)))
4322 {
4323#ifdef DNMALLOC_CHECKS
4324 if (p) {
4325 fprintf(stderr, "Attempt to free memory not in use\n");
4326 abort();
4327 } else {
4328 fprintf(stderr, "Attempt to free memory not allocated\n");
4329 abort();
4330 }
4331#endif
4332 assert(p && inuse(p));
4333 return;
4334 }
4335
[172]4336 VALGRIND_FREELIKE_BLOCK(mem, 0);
4337
[171]4338 guard_check(av->guard_stored, p);
[172]4339
[171]4340 size = chunksize(p);
4341
4342 check_inuse_chunk(p);
4343
4344 /*
4345 If eligible, place chunk on a fastbin so it can be found
4346 and used quickly in malloc.
4347 */
4348
4349 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4350
4351#if TRIM_FASTBINS
4352 /*
4353 If TRIM_FASTBINS set, don't place chunks
4354 bordering top into fastbins
4355 */
4356 && (chunk_at_offset(chunk(p), size) != av->top)
4357#endif
4358 ) {
4359
4360 set_fastchunks(av);
4361 fb = &(av->fastbins[fastbin_index(size)]);
4362 p->fd = *fb;
4363 *fb = p;
4364 }
4365
4366 /*
4367 Consolidate other non-mmapped chunks as they arrive.
4368 */
4369
4370 else if (!chunk_is_mmapped(p)) {
4371 set_anychunks(av);
4372
4373 nextchunk = next_chunkinfo(p);
4374 if (nextchunk)
4375 nextsize = chunksize(nextchunk);
4376 else
4377 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4378
4379 /* consolidate backward */
4380 if (UNLIKELY(!prev_inuse(p))) {
4381 prevchunk = prev_chunkinfo(p);
4382 prevsize = chunksize(prevchunk);
4383#ifdef DNMALLOC_CHECKS
4384 if (inuse(prevchunk)) {
4385 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4386 abort();
4387 }
4388#else
4389 assert(!inuse(prevchunk));
4390#endif
4391 size += prevsize;
4392 unlink(prevchunk, bck, fwd);
4393 set_head(p, size | PREV_INUSE);
4394 hashtable_skiprm(prevchunk,p);
4395 /* This chunk no longer exists in any form: release the chunkinfoptr
4396 */
4397 freecilst_add(p);
4398 p = prevchunk;
4399 }
4400
4401 if (nextchunk) {
4402 if (nextchunk != av->top) {
4403 /* get and clear inuse bit */
4404 clear_previnuse(nextchunk);
4405
4406 /* consolidate forward */
4407 if (!inuse(nextchunk)) {
4408 unlink(nextchunk, bck, fwd);
4409 size += nextsize;
4410 set_head(p, size | PREV_INUSE);
4411 hashtable_skiprm(p, nextchunk);
4412 freecilst_add (nextchunk);
4413 }
4414
4415 set_head(p, size | PREV_INUSE);
4416 next = next_chunkinfo(p);
4417 if (next)
4418 next->prev_size = size;
4419
4420 /*
4421 Place the chunk in unsorted chunk list. Chunks are
4422 not placed into regular bins until after they have
4423 been given one chance to be used in malloc.
4424 */
4425
4426 bck = unsorted_chunks(av);
4427 fwd = bck->fd;
4428 p->bk = bck;
4429 p->fd = fwd;
4430 bck->fd = p;
4431 fwd->bk = p;
4432
4433 nextchunk = next_chunkinfo(p);
4434 if (nextchunk)
4435 nextchunk->prev_size = chunksize(p);
4436
4437 check_free_chunk(p);
4438 }
4439
4440 /*
4441 If the chunk borders the current high end of memory,
4442 consolidate into top
4443 */
4444
4445 else {
4446 size += nextsize;
4447 set_head(p, size | PREV_INUSE);
4448 hashtable_remove(chunk(av->top));
4449 freecilst_add(av->top);
4450 av->top = p;
4451 check_chunk(p);
4452 }
4453 } /* if (nextchunk) */
4454
4455 /*
4456 If freeing a large space, consolidate possibly-surrounding
4457 chunks. Then, if the total unused topmost memory exceeds trim
4458 threshold, ask malloc_trim to reduce top.
4459
4460 Unless max_fast is 0, we don't know if there are fastbins
4461 bordering top, so we cannot tell for sure whether threshold
4462 has been reached unless fastbins are consolidated. But we
4463 don't want to consolidate on each free. As a compromise,
4464 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4465 is reached.
4466 */
4467
4468 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4469 if (have_fastchunks(av))
4470 malloc_consolidate(av);
4471
4472#ifndef MORECORE_CANNOT_TRIM
4473 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4474 (CHUNK_SIZE_T)(av->trim_threshold))
4475 {
4476 if (morecore32bit(av))
4477 {
4478#ifdef DNMALLOC_DEBUG
4479 fprintf(stderr, "Calling systrim from free()\n");
4480#endif
4481 sYSTRIm(av->top_pad, av);
4482#ifdef DNMALLOC_DEBUG
4483 fprintf(stderr, "Systrim done\n");
4484#endif
4485 }
4486 }
4487#endif
4488 }
4489
4490 }
4491 /*
4492 If the chunk was allocated via mmap, release via munmap()
4493 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4494 true, then user must have overwritten memory. There's nothing
4495 we can do to catch this error unless DEBUG is set, in which case
4496 check_inuse_chunk (above) will have triggered error.
4497 */
4498
4499 else {
4500 int ret;
4501 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4502 av->n_mmaps--;
4503 av->mmapped_mem -= (size + offset);
4504 ret = munmap((char*) chunk(p) - offset, size + offset);
4505 hashtable_remove_mmapped(chunk(p));
4506 freecilst_add(p);
4507 /* munmap returns non-zero on failure */
4508 assert(ret == 0);
4509 }
4510 }
4511}
4512
4513/*
4514 ------------------------- malloc_consolidate -------------------------
4515
4516 malloc_consolidate is a specialized version of free() that tears
4517 down chunks held in fastbins. Free itself cannot be used for this
4518 purpose since, among other things, it might place chunks back onto
4519 fastbins. So, instead, we need to use a minor variant of the same
4520 code.
4521
4522 Also, because this routine needs to be called the first time through
4523 malloc anyway, it turns out to be the perfect place to trigger
4524 initialization code.
4525*/
4526
4527#if __STD_C
4528static void malloc_consolidate(mstate av)
4529#else
4530static void malloc_consolidate(av) mstate av;
4531#endif
4532{
4533 mfastbinptr* fb; /* current fastbin being consolidated */
4534 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4535 chunkinfoptr p; /* current chunk being consolidated */
4536 chunkinfoptr nextp; /* next chunk to consolidate */
4537 chunkinfoptr prevp;
4538 chunkinfoptr unsorted_bin; /* bin header */
4539 chunkinfoptr first_unsorted; /* chunk to link to */
4540
4541 /* These have same use as in free() */
4542 chunkinfoptr nextchunk;
4543 INTERNAL_SIZE_T size;
4544 INTERNAL_SIZE_T nextsize;
4545 INTERNAL_SIZE_T prevsize;
4546 chunkinfoptr bck;
4547 chunkinfoptr fwd;
4548 chunkinfoptr next;
4549
4550 /*
4551 If max_fast is 0, we know that av hasn't
4552 yet been initialized, in which case do so below
4553 */
4554 if (av && av->max_fast != 0) {
[172]4555
[171]4556 clear_fastchunks(av);
4557
4558 unsorted_bin = unsorted_chunks(av);
4559
4560 /*
4561 Remove each chunk from fast bin and consolidate it, placing it
4562 then in unsorted bin. Among other reasons for doing this,
4563 placing in unsorted bin avoids needing to calculate actual bins
4564 until malloc is sure that chunks aren't immediately going to be
4565 reused anyway.
4566 */
4567
4568 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4569 fb = &(av->fastbins[0]);
4570 do {
4571 if ( UNLIKELY((p = *fb) != 0)) {
4572 *fb = 0;
4573 do {
4574 check_inuse_chunk(p);
4575 nextp = p->fd;
4576
4577 /*
4578 * Slightly streamlined version of consolidation code in free()
4579 */
4580
4581 size = chunksize(p);
4582 nextchunk = next_chunkinfo(p);
4583
4584 /* gcc doesn't notice that it's only used if (nextchunk) */
4585 if (nextchunk)
4586 nextsize = chunksize(nextchunk);
4587 else
4588 nextsize = 0;
4589
4590 if (!prev_inuse(p)) {
4591 prevp = prev_chunkinfo(p);
4592 prevsize = chunksize(prevp);
4593 size += prevsize;
4594#ifdef DNMALLOC_CHECKS
4595 if (inuse(prevp)) {
4596 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4597 abort();
4598 }
4599#else
4600 assert(!inuse(prevp));
4601#endif
4602 unlink(prevp, bck, fwd);
4603 set_head(p, size | PREV_INUSE);
4604 hashtable_skiprm(prevp,p);
4605 freecilst_add(p);
4606 p=prevp;
4607 }
4608
4609 if (nextchunk) {
4610 if (nextchunk != av->top) {
4611
4612 clear_previnuse(nextchunk);
4613
[172]4614 /* if mmap is used instead of sbrk, we may have a
4615 * chunk with !nextchunk->fd && !nextchunk->bk
4616 */
[171]4617 if (!inuse(nextchunk)) {
[172]4618 if( nextchunk->fd && nextchunk->bk) {
4619 size += nextsize;
4620 unlink(nextchunk, bck, fwd);
4621 set_head(p, size | PREV_INUSE);
4622 hashtable_skiprm(p,nextchunk);
4623 freecilst_add(nextchunk);
4624 }
[171]4625 }
4626
4627 first_unsorted = unsorted_bin->fd;
4628 unsorted_bin->fd = p;
4629 first_unsorted->bk = p;
4630
4631 set_head(p, size | PREV_INUSE);
4632 p->bk = unsorted_bin;
4633 p->fd = first_unsorted;
4634 next = next_chunkinfo(p);
4635 if (next)
4636 next->prev_size = size;
4637
4638
4639 }
4640
[172]4641 else if (nextchunk == av->top) {
[171]4642 size += nextsize;
4643 set_head(p, size | PREV_INUSE);
4644 hashtable_remove(chunk(av->top));
4645 freecilst_add(av->top);
4646 av->top = p;
4647 }
[172]4648 } /* if (nextchunk) */
[171]4649
4650 } while ( (p = nextp) != 0);
4651
4652 }
4653 } while (fb++ != maxfb);
4654 }
4655 else {
4656 // Initialize dnmalloc
4657 dnmalloc_init();
4658 malloc_init_state(get_malloc_state());
4659 check_malloc_state();
4660 }
4661}
4662
4663/*
4664 ------------------------------ realloc ------------------------------
4665*/
4666
4667
4668#if __STD_C
4669DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4670#else
4671DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4672#endif
4673{
4674 mstate av = get_malloc_state();
4675
4676 INTERNAL_SIZE_T nb; /* padded request size */
4677
4678 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4679 INTERNAL_SIZE_T oldsize; /* its size */
4680
4681 chunkinfoptr newp; /* chunk to return */
4682 INTERNAL_SIZE_T newsize; /* its size */
4683 Void_t* newmem; /* corresponding user mem */
4684
4685 chunkinfoptr next; /* next contiguous chunk after oldp */
4686
4687 chunkinfoptr remainder; /* extra space at end of newp */
4688 CHUNK_SIZE_T remainder_size; /* its size */
4689
4690 chunkinfoptr bck; /* misc temp for linking */
4691 chunkinfoptr fwd; /* misc temp for linking */
4692
4693 CHUNK_SIZE_T copysize; /* bytes to copy */
4694 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4695 INTERNAL_SIZE_T* s; /* copy source */
4696 INTERNAL_SIZE_T* d; /* copy destination */
4697
4698
4699#ifdef REALLOC_ZERO_BYTES_FREES
4700 if (UNLIKELY(bytes == 0)) {
4701 fREe(oldmem);
4702 return 0;
4703 }
4704#endif
4705
4706 if (UNLIKELY(!av || av->max_fast == 0)) {
4707 malloc_consolidate(av);
4708 av = get_malloc_state();
4709 }
4710
4711 /* realloc of null is supposed to be same as malloc */
4712 if (UNLIKELY(oldmem == 0))
4713 return mALLOc(bytes);
4714
4715 checked_request2size(bytes, nb);
4716
4717 oldp = hashtable_lookup(oldmem);
4718
4719 if (UNLIKELY(!oldp || !inuse(oldp))){
4720 /* attempt to either realloc memory not managed by us
4721 * or memory that is not in use
4722 */
4723#ifdef DNMALLOC_CHECKS
4724 if (oldp) {
4725 fprintf(stderr, "Attempt to free memory not in use\n");
4726 abort();
4727 } else {
4728 fprintf(stderr, "Attempt to free memory not allocated\n");
4729 abort();
4730 }
4731#endif
4732 assert(oldp && inuse(oldp));
4733 return 0;
4734 }
4735
[172]4736 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
[171]4737 guard_check(av->guard_stored, oldp);
4738
4739 oldsize = chunksize(oldp);
4740
4741 check_inuse_chunk(oldp);
4742
4743 if (!chunk_is_mmapped(oldp)) {
4744
4745 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4746 /* already big enough; split below */
4747 newp = oldp;
4748 newsize = oldsize;
4749 }
4750
4751 else {
4752 next = next_chunkinfo(oldp);
4753 if (next)
4754 next->prev_size = oldsize;
4755 /* Try to expand forward into top */
4756 if (next && next == av->top &&
4757 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4758 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4759 set_head_size(oldp, nb);
4760 hashtable_remove(chunk(av->top));
4761 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4762 set_head(av->top, (newsize - nb) | PREV_INUSE);
4763 /* av->top->chunk has been moved move in hashtable */
4764 hashtable_insert(oldp, av->top);
4765 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4766 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4767 return chunk(oldp);
4768 }
4769
4770 /* Try to expand forward into next chunk; split off remainder below */
4771 else if (next && next != av->top &&
4772 !inuse(next) &&
4773 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4774 (CHUNK_SIZE_T)(nb)) {
4775 newp = oldp;
4776 unlink(next, bck, fwd);
4777 hashtable_remove(chunk(next));
4778 freecilst_add(next);
4779 next = next_chunkinfo(oldp);
4780 if (next)
4781 next->prev_size = newsize;
4782 }
4783
4784 /* allocate, copy, free */
4785 else {
4786
4787 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
[172]4788 if (newmem == 0)
[171]4789 return 0; /* propagate failure */
4790
4791 newp = hashtable_lookup(newmem);
4792 newsize = chunksize(newp);
4793
[172]4794
[171]4795 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4796 /*
4797 Avoid copy if newp is next chunk after oldp.
4798 */
4799 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4800 newsize += oldsize;
4801 set_head_size(oldp, newsize);
4802 hashtable_skiprm(oldp, newp);
4803 freecilst_add(newp);
4804 newp = oldp;
4805 }
4806 else {
4807 /*
4808 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4809 We know that contents have an even number of
4810 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4811 */
4812
[172]4813 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4814
[171]4815 copysize = oldsize;
4816 s = (INTERNAL_SIZE_T*)(oldmem);
4817 d = (INTERNAL_SIZE_T*)(newmem);
4818 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4819 assert(ncopies >= 2);
4820
4821 if (ncopies > 10)
4822 MALLOC_COPY(d, s, copysize);
4823
4824 else {
4825 *(d+0) = *(s+0);
4826 *(d+1) = *(s+1);
4827 if (ncopies > 2) {
4828 *(d+2) = *(s+2);
4829 *(d+3) = *(s+3);
4830 if (ncopies > 4) {
4831 *(d+4) = *(s+4);
4832 *(d+5) = *(s+5);
4833 if (ncopies > 6) {
4834 *(d+6) = *(s+6);
4835 *(d+7) = *(s+7);
4836 if (ncopies > 8) {
4837 *(d+8) = *(s+8);
4838 *(d+9) = *(s+9);
4839 }
4840 }
4841 }
4842 }
4843 }
4844
4845 fREe(oldmem);
4846 check_inuse_chunk(newp);
4847 guard_set(av->guard_stored, newp, bytes, nb);
4848 return chunk(newp);
4849 }
4850 }
4851 }
4852
4853 /* If possible, free extra space in old or extended chunk */
4854
4855 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4856
4857 remainder_size = newsize - nb;
4858
4859 if (remainder_size >= MINSIZE) { /* split remainder */
4860 remainder = cireg_getfree();
4861 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4862 set_head_size(newp, nb);
4863 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4864 remainder->prev_size = nb;
4865 hashtable_insert(newp, remainder);
4866 /* Mark remainder as inuse so free() won't complain */
4867 set_all_inuse(remainder);
4868 guard_set(av->guard_stored, remainder, 0, remainder_size);
[172]4869 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
[171]4870 fREe(chunk(remainder));
4871 }
4872 else { /* not enough extra to split off */
4873 set_head_size(newp, newsize);
4874 set_all_inuse(newp);
4875 }
4876
4877 check_inuse_chunk(newp);
4878 guard_set(av->guard_stored, newp, bytes, nb);
[172]4879 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
[171]4880 return chunk(newp);
4881 }
4882
4883 /*
4884 Handle mmap cases
4885 */
4886
4887 else {
4888
4889#if HAVE_MREMAP
4890 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4891 size_t pagemask = av->pagesize - 1;
4892 char *cp;
4893 CHUNK_SIZE_T sum;
4894
[172]4895
[171]4896 /* Note the extra SIZE_SZ overhead */
4897 //newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4898 newsize = (nb + offset + pagemask) & ~pagemask;
4899
4900 /* don't need to remap if still within same page */
4901 if (oldsize == newsize - offset)
4902 {
4903 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4904 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4905 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
[171]4906 return oldmem;
4907 }
4908
4909 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4910
4911 if (cp != (char*)MORECORE_FAILURE) {
4912
4913 hashtable_remove_mmapped(chunk(oldp));
4914
4915 oldp->chunk = (mchunkptr)(cp + offset);
4916 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4917
4918 hashtable_add(oldp);
4919
4920 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4921 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4922
4923 /* update statistics */
4924 sum = av->mmapped_mem += newsize - oldsize;
4925 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4926 av->max_mmapped_mem = sum;
4927 sum += av->sbrked_mem;
4928 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4929 av->max_total_mem = sum;
4930
4931 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4932 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4933 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4934 return chunk(oldp);
4935 }
4936#endif /* have MREMAP */
4937
4938 /* Note the extra SIZE_SZ overhead. */
4939 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4940 newmem = oldmem; /* do nothing */
4941 else {
4942 /* Must alloc, copy, free. */
4943 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4944 if (newmem != 0) {
4945 MALLOC_COPY(newmem, oldmem, oldsize);
4946 fREe(oldmem);
4947 }
4948 }
[172]4949 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
[171]4950 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4951 return newmem;
4952 }
4953}
4954
4955/*
4956 ---------------------------posix_memalign ----------------------------
4957*/
4958
4959#if __STD_C
4960DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4961#else
4962DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4963#endif
4964{
4965 mstate av;
4966
4967 if (alignment % sizeof(void *) != 0)
4968 return EINVAL;
4969 if ((alignment & (alignment - 1)) != 0)
4970 return EINVAL;
4971
4972 av = get_malloc_state();
4973 if (!av || av->max_fast == 0) malloc_consolidate(av);
4974 *memptr = mEMALIGn(alignment, bytes);
4975
4976 return (*memptr != NULL ? 0 : ENOMEM);
4977}
4978
4979/*
4980 ------------------------------ memalign ------------------------------
4981*/
4982
4983#if __STD_C
4984DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
4985#else
4986DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4987#endif
4988{
4989 INTERNAL_SIZE_T nb; /* padded request size */
4990 char* m; /* memory returned by malloc call */
4991 chunkinfoptr p; /* corresponding chunk */
4992 char* brk; /* alignment point within p */
4993 chunkinfoptr newp; /* chunk to return */
4994 INTERNAL_SIZE_T newsize; /* its size */
4995 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4996 chunkinfoptr remainder; /* spare room at end to split off */
4997 CHUNK_SIZE_T remainder_size; /* its size */
4998 INTERNAL_SIZE_T size;
4999 mstate av;
5000
5001 /* If need less alignment than we give anyway, just relay to malloc */
5002
5003 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
5004
5005 /* Otherwise, ensure that it is at least a minimum chunk size */
5006
5007 if (alignment < MINSIZE) alignment = MINSIZE;
5008
5009 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5010 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5011 size_t a = MALLOC_ALIGNMENT * 2;
5012 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5013 alignment = a;
5014 }
5015
5016 checked_request2size(bytes, nb);
5017
5018 /*
5019 Strategy: find a spot within that chunk that meets the alignment
5020 request, and then possibly free the leading and trailing space.
5021 */
5022
5023
5024 /* Call malloc with worst case padding to hit alignment. */
5025
5026 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5027
5028 if (m == 0) return 0; /* propagate failure */
5029
5030 av = get_malloc_state();
5031
5032 p = hashtable_lookup((mchunkptr) m);
5033
5034 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5035
5036 /*
5037 Find an aligned spot inside chunk. Since we need to give back
5038 leading space in a chunk of at least MINSIZE, if the first
5039 calculation places us at a spot with less than MINSIZE leader,
5040 we can move to the next aligned spot -- we've allocated enough
5041 total room so that this is always possible.
5042 */
5043
5044 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5045 -((signed long) alignment)));
5046 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5047 brk += alignment;
5048
5049 newp = cireg_getfree();
5050 newp->chunk = (mchunkptr)brk;
5051 leadsize = brk - (char*)(chunk(p));
5052 newsize = chunksize(p) - leadsize;
5053
5054 /* For mmapped chunks, just adjust offset */
5055 if (UNLIKELY(chunk_is_mmapped(p))) {
5056 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5057 set_head(newp, newsize|IS_MMAPPED|INUSE);
5058 hashtable_remove_mmapped(chunk(p));
5059 freecilst_add(p);
5060 hashtable_add(newp);
5061 guard_set(av->guard_stored, newp, bytes, nb);
5062 return chunk(newp);
5063 }
5064
5065 /* Otherwise, give back leader, use the rest */
5066 set_head(newp, newsize | PREV_INUSE | INUSE);
5067 set_head_size(p, leadsize);
5068 set_all_inuse(newp);
5069 hashtable_add(newp); /* 20.05.2008 rw */
5070 guard_set(av->guard_stored, p, 0, leadsize);
5071 fREe(chunk(p));
5072 p = newp;
5073
5074 assert (newsize >= nb &&
5075 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5076 }
5077
5078 /* Also give back spare room at the end */
5079 if (!chunk_is_mmapped(p)) {
5080 size = chunksize(p);
5081 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5082 remainder = cireg_getfree();
5083 remainder_size = size - nb;
5084 remainder->chunk = chunk_at_offset(chunk(p), nb);
5085 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5086 set_head_size(p, nb);
5087 hashtable_add(remainder); /* 20.05.2008 rw */
5088 guard_set(av->guard_stored, remainder, 0, remainder_size);
5089 fREe(chunk(remainder));
5090 }
5091 }
5092
5093 check_inuse_chunk(p);
5094 guard_set(av->guard_stored, p, bytes, nb);
5095 return chunk(p);
5096}
5097
5098/*
5099 ------------------------------ calloc ------------------------------
5100*/
5101
5102#if __STD_C
5103DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5104#else
5105DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5106#endif
5107{
5108 chunkinfoptr p;
5109 CHUNK_SIZE_T clearsize;
5110 CHUNK_SIZE_T nclears;
5111 INTERNAL_SIZE_T* d;
5112 Void_t* mem;
5113
5114
5115 mem = mALLOc(n_elements * elem_size);
5116
5117 if (mem != 0) {
5118 p = hashtable_lookup(mem);
5119
5120 if (!chunk_is_mmapped(p))
5121 {
5122 /*
5123 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5124 We know that contents have an even number of
5125 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5126 */
5127
5128 d = (INTERNAL_SIZE_T*)mem;
5129 clearsize = chunksize(p);
5130 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5131 assert(nclears >= 2);
5132
5133 if (nclears > 10) {
5134 MALLOC_ZERO(d, clearsize);
5135 }
5136
5137 else {
5138 *(d+0) = 0;
5139 *(d+1) = 0;
5140 if (nclears > 2) {
5141 *(d+2) = 0;
5142 *(d+3) = 0;
5143 if (nclears > 4) {
5144 *(d+4) = 0;
5145 *(d+5) = 0;
5146 if (nclears > 6) {
5147 *(d+6) = 0;
5148 *(d+7) = 0;
5149 if (nclears > 8) {
5150 *(d+8) = 0;
5151 *(d+9) = 0;
5152 }
5153 }
5154 }
5155 }
5156 }
5157 }
5158#if ! MMAP_CLEARS
5159 else
5160 {
5161 d = (INTERNAL_SIZE_T*)mem;
5162 clearsize = chunksize(p);
5163 MALLOC_ZERO(d, clearsize);
5164 }
5165#endif
5166 /* Set guard again, since we just cleared it
5167 */
5168 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5169 }
5170
5171 return mem;
5172}
5173
5174/*
5175 ------------------------------ valloc ------------------------------
5176*/
5177
5178#if __STD_C
5179DL_STATIC Void_t* vALLOc(size_t bytes)
5180#else
5181DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5182#endif
5183{
5184 /* Ensure initialization */
5185 mstate av = get_malloc_state();
5186 if (!av || av->max_fast == 0) {
5187 malloc_consolidate(av);
5188 av = get_malloc_state();
5189 }
5190 return mEMALIGn(av->pagesize, bytes);
5191}
5192
5193/*
5194 ------------------------------ pvalloc ------------------------------
5195*/
5196
5197
5198#if __STD_C
5199DL_STATIC Void_t* pVALLOc(size_t bytes)
5200#else
5201DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5202#endif
5203{
5204 mstate av = get_malloc_state();
5205 size_t pagesz;
5206
5207 /* Ensure initialization */
5208 if (!av || av->max_fast == 0) {
5209 malloc_consolidate(av);
5210 av = get_malloc_state();
5211 }
5212 pagesz = av->pagesize;
5213 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5214}
5215
5216
5217/*
5218 ------------------------------ malloc_trim ------------------------------
5219*/
5220
5221#if __STD_C
5222DL_STATIC int mTRIm(size_t pad)
5223#else
5224DL_STATIC int mTRIm(pad) size_t pad;
5225#endif
5226{
5227 mstate av = get_malloc_state();
5228 /* Ensure initialization/consolidation */
5229 malloc_consolidate(av);
5230 av = get_malloc_state();
5231#ifndef MORECORE_CANNOT_TRIM
5232 if (morecore32bit(av))
5233 return sYSTRIm(pad, av);
5234 else
5235 return 0;
5236#else
5237 return 0;
5238#endif
5239}
5240
5241
5242
5243/*
5244 ------------------------- malloc_usable_size -------------------------
5245*/
5246
5247#if __STD_C
5248DL_STATIC size_t mUSABLe(Void_t* mem)
5249#else
5250DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5251#endif
5252{
5253 chunkinfoptr p;
5254 if (mem != 0) {
5255 p = hashtable_lookup(mem);
5256 if (p && inuse(p)) return chunksize(p);
5257 }
5258 return 0;
5259}
5260
5261/*
5262 ------------------------------ mallinfo ------------------------------
5263*/
5264
5265DL_STATIC struct mallinfo mALLINFo()
5266{
5267 mstate av = get_malloc_state();
5268 struct mallinfo mi;
5269 unsigned int i;
5270 mbinptr b;
5271 chunkinfoptr p;
5272 INTERNAL_SIZE_T avail;
5273 INTERNAL_SIZE_T fastavail;
5274 int nblocks;
5275 int nfastblocks;
5276
5277 /* Ensure initialization */
5278 if (!av || av->top == 0) {
5279 malloc_consolidate(av);
5280 av = get_malloc_state();
5281 }
5282 check_malloc_state();
5283
5284 /* Account for top */
5285 avail = chunksize(av->top);
5286 nblocks = 1; /* top always exists */
5287
5288 /* traverse fastbins */
5289 nfastblocks = 0;
5290 fastavail = 0;
5291
5292 for (i = 0; i < NFASTBINS; ++i) {
5293 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5294 ++nfastblocks;
5295 fastavail += chunksize(p);
5296 }
5297 }
5298
5299 avail += fastavail;
5300
5301 /* traverse regular bins */
5302 for (i = 1; i < NBINS; ++i) {
5303 b = bin_at(av, i);
5304 for (p = last(b); p != b; p = p->bk) {
5305 ++nblocks;
5306 avail += chunksize(p);
5307 }
5308 }
5309
5310 mi.smblks = nfastblocks;
5311 mi.ordblks = nblocks;
5312 mi.fordblks = avail;
5313 mi.uordblks = av->sbrked_mem - avail;
5314 mi.arena = av->sbrked_mem;
5315 mi.hblks = av->n_mmaps;
5316 mi.hblkhd = av->mmapped_mem;
5317 mi.fsmblks = fastavail;
5318 mi.keepcost = chunksize(av->top);
5319 mi.usmblks = av->max_total_mem;
5320 return mi;
5321}
5322
5323/*
5324 ------------------------------ malloc_stats ------------------------------
5325*/
5326
5327DL_STATIC void mSTATs()
5328{
5329 struct mallinfo mi = mALLINFo();
5330
5331 fprintf(stderr, "hashtable = %10lu MB\n",
5332 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5333 fprintf(stderr, "max system bytes = %10lu\n",
5334 (CHUNK_SIZE_T)(mi.usmblks));
5335 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5336 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5337 (CHUNK_SIZE_T)(mi.arena),
5338 (CHUNK_SIZE_T)(mi.hblkhd));
5339 fprintf(stderr, "in use bytes = %10lu\n",
5340 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5341
5342}
5343
5344
5345/*
5346 ------------------------------ mallopt ------------------------------
5347*/
5348
5349#if __STD_C
5350DL_STATIC int mALLOPt(int param_number, int value)
5351#else
5352DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5353#endif
5354{
5355 mstate av = get_malloc_state();
5356 /* Ensure initialization/consolidation */
5357 malloc_consolidate(av);
5358 av = get_malloc_state();
5359
5360 switch(param_number) {
5361 case M_MXFAST:
5362 if (value >= 0 && value <= MAX_FAST_SIZE) {
5363 set_max_fast(av, value);
5364 return 1;
5365 }
5366 else
5367 return 0;
5368
5369 case M_TRIM_THRESHOLD:
5370 av->trim_threshold = value;
5371 return 1;
5372
5373 case M_TOP_PAD:
5374 av->top_pad = value;
5375 return 1;
5376
5377 case M_MMAP_THRESHOLD:
5378 av->mmap_threshold = value;
5379 return 1;
5380
5381 case M_MMAP_MAX:
5382 if (value != 0)
5383 return 0;
5384 av->n_mmaps_max = value;
5385 return 1;
5386
5387 default:
5388 return 0;
5389 }
5390}
5391
5392
5393/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5394
5395/*
5396 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5397 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5398 *
5399 * Permission to use, copy, modify, and distribute this software for any
5400 * purpose with or without fee is hereby granted, provided that the above
5401 * copyright notice and this permission notice appear in all copies.
5402 *
5403 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5404 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5405 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5406 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5407 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5408 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5409 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5410 */
5411
5412/*
5413 * Arc4 random number generator for OpenBSD.
5414 *
5415 * This code is derived from section 17.1 of Applied Cryptography,
5416 * second edition, which describes a stream cipher allegedly
5417 * compatible with RSA Labs "RC4" cipher (the actual description of
5418 * which is a trade secret). The same algorithm is used as a stream
5419 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5420 *
5421 * Here the stream cipher has been modified always to include the time
5422 * when initializing the state. That makes it impossible to
5423 * regenerate the same random sequence twice, so this can't be used
5424 * for encryption, but will generate good random numbers.
5425 *
5426 * RC4 is a registered trademark of RSA Laboratories.
5427 */
5428
5429/* Moved u_int8_t -> unsigned char (portability)
5430 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5431 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5432 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5433 * This is arc4random(3) using urandom.
5434 */
5435
5436#include <fcntl.h>
5437#include <limits.h>
5438#include <stdlib.h>
5439#include <sys/param.h>
5440#include <sys/time.h>
5441
5442struct arc4_stream {
5443 unsigned char i;
5444 unsigned char j;
5445 unsigned char s[256];
5446};
5447
5448static int rs_initialized;
5449static struct arc4_stream rs;
5450static pid_t arc4_stir_pid;
5451static int arc4_count;
5452
5453static unsigned char arc4_getbyte(void);
5454
5455static void
5456arc4_init(void)
5457{
5458 int n;
5459
5460 for (n = 0; n < 256; n++)
5461 rs.s[n] = n;
5462 rs.i = 0;
5463 rs.j = 0;
5464}
5465
5466static inline void
5467arc4_addrandom(unsigned char *dat, int datlen)
5468{
5469 int n;
5470 unsigned char si;
5471
5472 rs.i--;
5473 for (n = 0; n < 256; n++) {
5474 rs.i = (rs.i + 1);
5475 si = rs.s[rs.i];
5476 rs.j = (rs.j + si + dat[n % datlen]);
5477 rs.s[rs.i] = rs.s[rs.j];
5478 rs.s[rs.j] = si;
5479 }
5480 rs.j = rs.i;
5481}
5482
5483#ifdef HAVE_SCHED_H
5484#include <sched.h>
5485#endif
5486
5487static void
5488arc4_stir(void)
5489{
5490 int i;
5491 struct {
5492 struct timeval tv1;
5493 struct timeval tv2;
5494 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5495 } rdat;
5496#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5497 size_t sz = 0;
5498 int fd;
5499#endif
5500
5501 gettimeofday(&rdat.tv1, NULL);
5502
5503
5504 if (!rs_initialized) {
5505 arc4_init();
5506 rs_initialized = 1;
5507 }
5508
5509#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5510
5511#ifdef HAVE_SCHED_YIELD
5512 /* Yield the processor to introduce some random delay. */
5513 (void) sched_yield();
5514#endif
5515
5516 /*
5517 * Pthread problem in multithreaded code on *BSD.
5518 */
5519 fd = open("/dev/urandom", O_RDONLY);
5520 if (fd != -1) {
5521 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5522 close(fd);
5523 }
5524 if (sz > sizeof (rdat.rnd))
5525 sz = 0;
5526 #endif
5527
5528 arc4_stir_pid = getpid();
5529 gettimeofday(&rdat.tv2, NULL);
5530
5531 arc4_addrandom((void *)&rdat, sizeof(rdat));
5532
5533 /*
5534 * Discard early keystream, as per recommendations in:
5535 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5536 */
5537 for (i = 0; i < 256; i++)
5538 (void)arc4_getbyte();
5539 arc4_count = 1600000;
5540}
5541
5542static unsigned char
5543arc4_getbyte(void)
5544{
5545 unsigned char si, sj;
5546
5547 rs.i = (rs.i + 1);
5548 si = rs.s[rs.i];
5549 rs.j = (rs.j + si);
5550 sj = rs.s[rs.j];
5551 rs.s[rs.i] = sj;
5552 rs.s[rs.j] = si;
5553 return (rs.s[(si + sj) & 0xff]);
5554}
5555
5556
5557 /* Changed to return char* */
5558static char *
5559dnmalloc_arc4random(void)
5560{
5561 static char val[4];
5562
5563 /* We only call this once, hence no need for locking. */
5564
5565 /* _ARC4_LOCK(); */
5566 arc4_count -= 4;
5567 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5568 arc4_stir();
5569
5570 val[0] = (char) arc4_getbyte();
5571 val[1] = (char) arc4_getbyte();
5572 val[2] = (char) arc4_getbyte();
5573 val[3] = (char) arc4_getbyte();
5574
5575 arc4_stir();
5576 /* _ARC4_UNLOCK(); */
5577 return val;
5578}
5579
5580#else
5581int dnmalloc_pthread_init() { return 0; }
5582#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.