source: trunk/src/dnmalloc.c@ 173

Last change on this file since 173 was 173, checked in by katerina, 16 years ago

x dnmalloc compile error (ticket #110).

File size: 163.5 KB
RevLine 
[171]1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
[172]2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
[171]5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
8 Yves Younan
9 Celestijnenlaan 200A
10 B-3001 Heverlee
11 Belgium
12
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 2.1 of the License, or (at your option) any later version.
17
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26
27*/
28
[172]29/* Current version: dnmalloc 1.0 */
[171]30/* Includes arc4random from OpenBSD, which is under the BDS license */
31
32/* Versions:
33 0.1-0.5:
34 Proof of concept implementation by Hans Van den Eynden and Yves Younan
35 0.6-0.7:
36 Bug fixes by Yves Younan
37 0.8-1.0.beta4:
38 Reimplementation from scratch by Yves Younan
39 1.0.beta4:
40 Public release
41 1.0.beta5:
42 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
43 A freechunkinfo region is now freed when it is completely empty and
44 not the current one
45
46 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
47 ---------------------
48
49 Compiler warnings fixed
50 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
51 (and what the standard says)
52 Removed unused code
53 Fix assert(aligned_OK(chunk(newp)));
54 -> assert(aligned_OK(chunk(oldp)));
55 Fix statistics in sYSMALLOc
56 Fix overwrite of av->top in sYSMALLOc
57 Provide own assert(), glibc assert() doesn't work (calls malloc)
58 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
59 Remove cfree, independent_cmalloc, independent_comalloc (untested
60 public functions not covered by any standard)
61 Provide posix_memalign (that one is in the standard)
62 Move the malloc_state struct to mmapped memory protected by guard pages
63 Add arc4random function to initialize random canary on startup
64 Implement random canary at end of (re|m)alloced/memaligned buffer,
65 check at free/realloc
66 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
67 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
68
69 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
70 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
71 checked_request2size (gcc predicts if{} branch to be taken).
72 Use UNLIKELY macro (gcc __builtin_expect()) where branch
73 reordering would make the code awkward.
74
75 Portability: Hashtable always covers full 32bit address space to
76 avoid assumptions about memory layout.
77 Portability: Try hard to enforce mapping of mmapped memory into
78 32bit address space, even on 64bit systems.
79 Portability: Provide a dnmalloc_pthread_init() function, since
80 pthread locking on HP-UX only works if initialized
81 after the application has entered main().
82 Portability: On *BSD, pthread_mutex_lock is unusable since it
83 calls malloc, use spinlocks instead.
84 Portability: Dynamically detect whether the heap is within
85 32bit address range (e.g. on Linux x86_64, it isn't).
86 Don't use sbrk() if the heap is mapped to an address
87 outside the 32bit range, since this doesn't work with
88 the hashtable. New macro morecore32bit.
89
90 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
91 FreeBSD/pthread, and Solaris 10 i386/pthread.
92 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
93 might be related to OpenBSD pthread internals (??).
94 Non-treaded version (#undef USE_MALLOC_LOC)
95 works on OpenBSD.
96
[172]97 further to 1.0:
98 Valgrind client requests inserted (#define USE_VALGRIND)
[173]99 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
[172]100 Portability: minsize = 32 bit on 64bit architecture
101 Minor cleanups
[173]102 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
[172]103
[171]104 There may be some bugs left in this version. please use with caution.
105*/
106
107
108
109/* Please read the following papers for documentation:
110
111 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
112 Countermeasures against Current and Future Code Injection Attacks,
113 Proceedings of the Third IEEE International Information Assurance
114 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
115 IEEE, IEEE Press.
116 http://www.fort-knox.org/younany_countermeasures.pdf
117
118 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
119 Eynden. Security of Memory Allocators for C and C++. Technical Report
120 CW419, Departement Computerwetenschappen, Katholieke Universiteit
121 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
122
123 */
124
125/* Compile:
126 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
127 "Link":
128 Dynamic:
129 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
130 Static:
131 ar -rv libdnmalloc.a dnmalloc-portable.o
132
133*/
134
135/*
136 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
137 dlmalloc was released as public domain and contained the following license:
138
139 "This is a version (aka dlmalloc) of malloc/free/realloc written by
140 Doug Lea and released to the public domain. Use, modify, and
141 redistribute this code without permission or acknowledgement in any
142 way you wish. Send questions, comments, complaints, performance
143 data, etc to dl@cs.oswego.edu
144
145 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
146
147 Note: There may be an updated version of this malloc obtainable at
148 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
149 Check before installing!"
150
151*/
152
153/* The following preprocessor macros are tested,
154 * and hence should have #define directives:
155 *
156 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
157 *
158 * HAVE_UNISTD_H Define to #include <unistd.h>
159 *
160 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
161 * HAVE_WRITEV Define if the 'writev' function is available
162 *
163 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
164 *
165 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
166 *
167 * HAVE_FCNTL_H Define to #include <fcntl.h>
168 *
169 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
170 * HAVE_MMAP Define if the 'mmap' function is available.
171 *
172 * HAVE_SCHED_H Define to #include <sched.h>
173 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
174 */
175
176
177/*
178 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
179 compiler, or a C compiler sufficiently close to ANSI to get away
180 with it.
181*/
182
183#ifdef HAVE_CONFIG_H
184#include "config.h"
185#endif
186
[172]187#ifdef USE_VALGRIND
188#include <valgrind/memcheck.h>
189#else
190#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
191#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
192#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
193#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
194#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
195#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
196#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
197#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
198#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
199#endif
200
[171]201#if defined (__GNUC__) && __GNUC__ > 2
202# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
203# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
204# define __attribute_malloc__ __attribute__ ((__malloc__))
205#else
206# define LIKELY(x) (x)
207# define UNLIKELY(x) (x)
208# define __attribute_malloc__ /* Ignore */
209#endif
210
211/*
212 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
213 large blocks. This is currently only possible on Linux with
214 kernel versions newer than 1.3.77.
215*/
216
217#ifndef HAVE_MREMAP
218#ifdef linux
219#define HAVE_MREMAP 1
220#define _GNU_SOURCE
221#else
222#define HAVE_MREMAP 0
223#endif
224#endif /* HAVE_MREMAP */
225
226
227
228#ifndef __STD_C
229#if defined(__STDC__) || defined(_cplusplus)
230#define __STD_C 1
231#else
232#define __STD_C 0
233#endif
234#endif /*__STD_C*/
235
236
237/*
238 Void_t* is the pointer type that malloc should say it returns
239*/
240
241#ifndef Void_t
242#if (__STD_C || defined(WIN32))
243#define Void_t void
244#else
245#define Void_t char
246#endif
247#endif /*Void_t*/
248
249#if __STD_C
250#include <stddef.h> /* for size_t */
251#else
252#include <sys/types.h>
253#endif
254
255#if !defined(USE_SYSTEM_MALLOC)
256
257#ifdef __cplusplus
258extern "C" {
259#endif
260
261/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
262
263#ifdef HAVE_UNISTD_H
264#include <unistd.h>
265#endif
266
267#ifdef HAVE_SYS_UIO_H
268#include <sys/uio.h>
269#endif
270
271#include <stdio.h> /* needed for malloc_stats */
272#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
273
274#include <string.h>
275#include <stdlib.h>
276
277#include <sys/resource.h>
278
279extern int errno;
280
281 /* 0: lazy,
282 * 1: medium (assertions compiled in),
283 * 2: high (guard pages at end of hash table and ciregions)
284 * 3: paranoid (guards at end of each allocated chunk, check at free)
285 */
286#ifndef PARANOIA
287#define PARANOIA 9
288#endif
289
290 /* Using assert() with multithreading will cause the code
291 * to deadlock since glibc __assert_fail will call malloc().
292 * We need our very own assert().
293 */
294typedef void assert_handler_tp(const char * error, const char *file, int line);
295
296#if PARANOIA > 0
297
298#ifdef NDEBUG
299#undef NDEBUG
300#endif
301
302static void default_assert_handler(const char *error,
303 const char *file, int line)
304{
305#ifdef HAVE_WRITEV
306 struct iovec iov[5];
307 char * i1 = "assertion failed (";
308 char * i3 = "): ";
309 char * i5 = "\n";
310
311 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
312 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
313 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
314 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
315 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
316 writev(STDERR_FILENO, iov, 5);
317#else
318 fputs("assertion failed (", stderr);
319 fputs(file, stderr);
320 fputs("): ", stderr);
321 fputs(error, stderr);
322 fputc('\n', stderr);
323#endif
[172]324 (void) line;
[171]325 abort();
326}
327static assert_handler_tp *assert_handler = default_assert_handler;
328
[172]329
[171]330#define assert(x) \
331 do { \
332 if (UNLIKELY(!(x))) { \
333 assert_handler(#x, __FILE__, __LINE__); \
334 } \
335 } while (0)
336
337#else
338
339static assert_handler_tp *assert_handler = NULL;
340#define NDEBUG
341#define assert(x) ((void)0)
342
343#endif
344
345assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
346{
347 assert_handler_tp *old = assert_handler;
348 assert_handler = new;
349 return old;
350}
351
352
353#include <stdarg.h>
354
355 /* define for debugging */
356 /* #define DNMALLOC_DEBUG */
357
[172]358 /* Do some extra checks? if not, covered by assrt()s */
[171]359 /* #define DNMALLOC_CHECKS */
360
361 /*
362 The unsigned integer type used for comparing any two chunk sizes.
363 This should be at least as wide as size_t, but should not be signed.
364 */
365
366#ifndef CHUNK_SIZE_T
367#define CHUNK_SIZE_T unsigned long
368#endif
369
370/*
371 The unsigned integer type used to hold addresses when they are are
372 manipulated as integers. Except that it is not defined on all
373 systems, intptr_t would suffice.
374*/
375#ifndef PTR_UINT
376#define PTR_UINT unsigned long
377#endif
378
379
380/*
381 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
382 of chunk sizes.
383
384 The default version is the same as size_t.
385
386 While not strictly necessary, it is best to define this as an
387 unsigned type, even if size_t is a signed type. This may avoid some
388 artificial size limitations on some systems.
389
390 On a 64-bit machine, you may be able to reduce malloc overhead by
391 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
392 expense of not being able to handle more than 2^32 of malloced
393 space. If this limitation is acceptable, you are encouraged to set
394 this unless you are on a platform requiring 16byte alignments. In
395 this case the alignment requirements turn out to negate any
396 potential advantages of decreasing size_t word size.
397
398 Implementors: Beware of the possible combinations of:
399 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
400 and might be the same width as int or as long
401 - size_t might have different width and signedness as INTERNAL_SIZE_T
402 - int and long might be 32 or 64 bits, and might be the same width
403 To deal with this, most comparisons and difference computations
404 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
405 aware of the fact that casting an unsigned int to a wider long does
406 not sign-extend. (This also makes checking for negative numbers
407 awkward.) Some of these casts result in harmless compiler warnings
408 on some systems.
409*/
410
411#ifndef INTERNAL_SIZE_T
412#define INTERNAL_SIZE_T size_t
413#endif
414
415/* The corresponding word size */
416#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
417
418
419
420/*
421 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
422 It must be a power of two at least 2 * SIZE_SZ, even on machines
423 for which smaller alignments would suffice. It may be defined as
424 larger than this though. Note however that code and data structures
425 are optimized for the case of 8-byte alignment.
426*/
427
428
429#ifndef MALLOC_ALIGNMENT
430#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
431#endif
432
433/* The corresponding bit mask value */
434#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
435
436
437
438/*
439 REALLOC_ZERO_BYTES_FREES should be set if a call to
440 realloc with zero bytes should be the same as a call to free.
441 Some people think it should. Otherwise, since this malloc
442 returns a unique pointer for malloc(0), so does realloc(p, 0).
443*/
444
445#define REALLOC_ZERO_BYTES_FREES
446
447/*
448 TRIM_FASTBINS controls whether free() of a very small chunk can
449 immediately lead to trimming. Setting to true (1) can reduce memory
450 footprint, but will almost always slow down programs that use a lot
451 of small chunks.
452
453 Define this only if you are willing to give up some speed to more
454 aggressively reduce system-level memory footprint when releasing
455 memory in programs that use many small chunks. You can get
456 essentially the same effect by setting MXFAST to 0, but this can
457 lead to even greater slowdowns in programs using many small chunks.
458 TRIM_FASTBINS is an in-between compile-time option, that disables
459 only those chunks bordering topmost memory from being placed in
460 fastbins.
461*/
462
463#ifndef TRIM_FASTBINS
464#define TRIM_FASTBINS 0
465#endif
466
467
468/*
469 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
470 This is necessary when you only want to use this malloc in one part
471 of a program, using your regular system malloc elsewhere.
472*/
473
474/* #define USE_DL_PREFIX */
475
476
477/*
478 USE_MALLOC_LOCK causes wrapper functions to surround each
479 callable routine with pthread mutex lock/unlock.
480
481 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
482*/
483
484/* #define USE_MALLOC_LOCK */
485
486
487/*
488 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
489 actually a wrapper function that first calls MALLOC_PREACTION, then
490 calls the internal routine, and follows it with
491 MALLOC_POSTACTION. This is needed for locking, but you can also use
492 this, without USE_MALLOC_LOCK, for purposes of interception,
493 instrumentation, etc. It is a sad fact that using wrappers often
494 noticeably degrades performance of malloc-intensive programs.
495*/
496
497
498#ifdef USE_MALLOC_LOCK
499#define USE_PUBLIC_MALLOC_WRAPPERS
500#else
501/* #define USE_PUBLIC_MALLOC_WRAPPERS */
502#endif
503
504
505/*
506 Two-phase name translation.
507 All of the actual routines are given mangled names.
508 When wrappers are used, they become the public callable versions.
509 When DL_PREFIX is used, the callable names are prefixed.
510*/
511
512#ifndef USE_PUBLIC_MALLOC_WRAPPERS
513#define cALLOc public_cALLOc
514#define fREe public_fREe
515#define mALLOc public_mALLOc
516#define mEMALIGn public_mEMALIGn
517#define posix_mEMALIGn public_posix_mEMALIGn
518#define rEALLOc public_rEALLOc
519#define vALLOc public_vALLOc
520#define pVALLOc public_pVALLOc
521#define mALLINFo public_mALLINFo
522#define mALLOPt public_mALLOPt
523#define mTRIm public_mTRIm
524#define mSTATs public_mSTATs
525#define mUSABLe public_mUSABLe
526#endif
527
528#ifdef USE_DL_PREFIX
529#define public_cALLOc dlcalloc
530#define public_fREe dlfree
531#define public_mALLOc dlmalloc
532#define public_mEMALIGn dlmemalign
533#define public_posix_mEMALIGn dlposix_memalign
534#define public_rEALLOc dlrealloc
535#define public_vALLOc dlvalloc
536#define public_pVALLOc dlpvalloc
537#define public_mALLINFo dlmallinfo
538#define public_mALLOPt dlmallopt
539#define public_mTRIm dlmalloc_trim
540#define public_mSTATs dlmalloc_stats
541#define public_mUSABLe dlmalloc_usable_size
542#else /* USE_DL_PREFIX */
543#define public_cALLOc calloc
544#define public_fREe free
545#define public_mALLOc malloc
546#define public_mEMALIGn memalign
547#define public_posix_mEMALIGn posix_memalign
548#define public_rEALLOc realloc
549#define public_vALLOc valloc
550#define public_pVALLOc pvalloc
551#define public_mALLINFo mallinfo
552#define public_mALLOPt mallopt
553#define public_mTRIm malloc_trim
554#define public_mSTATs malloc_stats
555#define public_mUSABLe malloc_usable_size
556#endif /* USE_DL_PREFIX */
557
558
559/*
560 HAVE_MEMCPY should be defined if you are not otherwise using
561 ANSI STD C, but still have memcpy and memset in your C library
562 and want to use them in calloc and realloc. Otherwise simple
563 macro versions are defined below.
564
565 USE_MEMCPY should be defined as 1 if you actually want to
566 have memset and memcpy called. People report that the macro
567 versions are faster than libc versions on some systems.
568
569 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
570 (of <= 36 bytes) are manually unrolled in realloc and calloc.
571*/
572
573#ifndef HAVE_MEMCPY
574#define HAVE_MEMCPY
575#endif
576
577#ifndef USE_MEMCPY
578#ifdef HAVE_MEMCPY
579#define USE_MEMCPY 1
580#else
581#define USE_MEMCPY 0
582#endif
583#endif
584
585
586#if (__STD_C || defined(HAVE_MEMCPY))
587
588#ifdef WIN32
[173]589 /* On Win32 memset and memcpy are already declared in windows.h */
[171]590#else
591#if __STD_C
[173]592 /* Defined in string.h */
[171]593#else
594Void_t* memset();
595Void_t* memcpy();
596#endif
597#endif
598#endif
599
600/*
601 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
602 malloc fails to be able to return memory, either because memory is
603 exhausted or because of illegal arguments.
604
605 By default, sets errno if running on STD_C platform, else does nothing.
606*/
607
608#ifndef MALLOC_FAILURE_ACTION
609#if __STD_C
610#define MALLOC_FAILURE_ACTION \
611 errno = ENOMEM;
612
613#else
614#define MALLOC_FAILURE_ACTION
615#endif
616#endif
617
618/*
619 MORECORE-related declarations. By default, rely on sbrk
620*/
621
622
623#if !defined(HAVE_UNISTD_H)
624#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
625#if __STD_C
626extern Void_t* sbrk(ptrdiff_t);
627#else
628extern Void_t* sbrk();
629#endif
630#endif
631#endif
632
633/*
634 MORECORE_FAILURE is the value returned upon failure of MORECORE
635 as well as mmap. Since it cannot be an otherwise valid memory address,
636 and must reflect values of standard sys calls, you probably ought not
637 try to redefine it.
638*/
639
640#ifndef MORECORE_FAILURE
641#define MORECORE_FAILURE ((void*)(-1UL))
642#endif
643
644/*
645 MORECORE is the name of the routine to call to obtain more memory
646 from the system. See below for general guidance on writing
647 alternative MORECORE functions, as well as a version for WIN32 and a
648 sample version for pre-OSX macos.
649*/
650
651#ifndef MORECORE
652#define MORECORE sbrk
653#endif
654
655
656/*
657 If MORECORE_CONTIGUOUS is true, take advantage of fact that
658 consecutive calls to MORECORE with positive arguments always return
659 contiguous increasing addresses. This is true of unix sbrk. Even
660 if not defined, when regions happen to be contiguous, malloc will
661 permit allocations spanning regions obtained from different
662 calls. But defining this when applicable enables some stronger
663 consistency checks and space efficiencies.
664*/
665
666#ifndef MORECORE_CONTIGUOUS
667#define MORECORE_CONTIGUOUS 1
668#endif
669
670/*
671 Define MORECORE_CANNOT_TRIM if your version of MORECORE
672 cannot release space back to the system when given negative
673 arguments. This is generally necessary only if you are using
674 a hand-crafted MORECORE function that cannot handle negative arguments.
675*/
676
677/* #define MORECORE_CANNOT_TRIM */
678
679
680/*
681 This malloc requires mmap for heap management data. It is an error
682 if mmap is not available.
683
684 Additionally, mmap will be used to satisfy large requests.
685*/
686
687#ifndef HAVE_MMAP
[172]688# error HAVE_MMAP not defined, has your operating system mmap?
[171]689#endif
690
691/*
692 Standard unix mmap using /dev/zero clears memory so calloc doesn't
693 need to.
694*/
695
696#ifndef MMAP_CLEARS
697#define MMAP_CLEARS 1
698#endif
699
700
701/*
702 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
703 sbrk fails, and mmap is used as a backup (which is done only if
704 HAVE_MMAP). The value must be a multiple of page size. This
705 backup strategy generally applies only when systems have "holes" in
706 address space, so sbrk cannot perform contiguous expansion, but
707 there is still space available on system. On systems for which
708 this is known to be useful (i.e. most linux kernels), this occurs
709 only when programs allocate huge amounts of memory. Between this,
710 and the fact that mmap regions tend to be limited, the size should
711 be large, to avoid too many mmap calls and thus avoid running out
712 of kernel resources.
713*/
714
715#ifndef MMAP_AS_MORECORE_SIZE
716#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
717#endif
718
719
720/*
721 The system page size. To the extent possible, this malloc manages
722 memory from the system in page-size units. Note that this value is
723 cached during initialization into a field of malloc_state. So even
724 if malloc_getpagesize is a function, it is only called once.
725
726 The following mechanics for getpagesize were adapted from bsd/gnu
727 getpagesize.h. If none of the system-probes here apply, a value of
728 4096 is used, which should be OK: If they don't apply, then using
729 the actual value probably doesn't impact performance.
730*/
731
732
733#ifndef malloc_getpagesize
734
735# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
736# ifndef _SC_PAGE_SIZE
737# define _SC_PAGE_SIZE _SC_PAGESIZE
738# endif
739# endif
740
741# ifdef _SC_PAGE_SIZE
742# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
743# else
744# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
745 extern size_t getpagesize();
746# define malloc_getpagesize getpagesize()
747# else
748# ifdef WIN32 /* use supplied emulation of getpagesize */
749# define malloc_getpagesize getpagesize()
750# else
751# if defined(HAVE_SYS_PARAM_H)
752# include <sys/param.h>
753# endif
754# ifdef EXEC_PAGESIZE
755# define malloc_getpagesize EXEC_PAGESIZE
756# else
757# ifdef NBPG
758# ifndef CLSIZE
759# define malloc_getpagesize NBPG
760# else
761# define malloc_getpagesize (NBPG * CLSIZE)
762# endif
763# else
764# ifdef NBPC
765# define malloc_getpagesize NBPC
766# else
767# ifdef PAGESIZE
768# define malloc_getpagesize PAGESIZE
769# else /* just guess */
770# define malloc_getpagesize (4096)
771# endif
772# endif
773# endif
774# endif
775# endif
776# endif
777# endif
778#endif
779
780/*
781 This version of malloc supports the standard SVID/XPG mallinfo
782 routine that returns a struct containing usage properties and
783 statistics. It should work on any SVID/XPG compliant system that has
784 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
785 install such a thing yourself, cut out the preliminary declarations
786 as described above and below and save them in a malloc.h file. But
787 there's no compelling reason to bother to do this.)
788
789 The main declaration needed is the mallinfo struct that is returned
790 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
791 bunch of fields that are not even meaningful in this version of
792 malloc. These fields are are instead filled by mallinfo() with
793 other numbers that might be of interest.
794
795 HAVE_MALLOC_H should be set if you have a
796 /usr/include/malloc.h file that includes a declaration of struct
797 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
798 version is declared below. These must be precisely the same for
799 mallinfo() to work. The original SVID version of this struct,
800 defined on most systems with mallinfo, declares all fields as
801 ints. But some others define as unsigned long. If your system
802 defines the fields using a type of different width than listed here,
803 you must #include your system version and #define
804 HAVE_MALLOC_H.
805*/
806
807/* #define HAVE_MALLOC_H */
808
809/* On *BSD, malloc.h is deprecated, and on some *BSD including
810 * it may actually raise an error.
811 */
812#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
813#include <malloc.h>
814#else
815
816/* SVID2/XPG mallinfo structure */
817
818struct mallinfo {
819 int arena; /* non-mmapped space allocated from system */
820 int ordblks; /* number of free chunks */
821 int smblks; /* number of fastbin blocks */
822 int hblks; /* number of mmapped regions */
823 int hblkhd; /* space in mmapped regions */
824 int usmblks; /* maximum total allocated space */
825 int fsmblks; /* space available in freed fastbin blocks */
826 int uordblks; /* total allocated space */
827 int fordblks; /* total free space */
828 int keepcost; /* top-most, releasable (via malloc_trim) space */
829};
830
831/*
832 SVID/XPG defines four standard parameter numbers for mallopt,
833 normally defined in malloc.h. Only one of these (M_MXFAST) is used
834 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
835 so setting them has no effect. But this malloc also supports other
836 options in mallopt described below.
837*/
838#endif
839
840
841/* ---------- description of public routines ------------ */
842
843/*
844 malloc(size_t n)
845 Returns a pointer to a newly allocated chunk of at least n bytes, or null
846 if no space is available. Additionally, on failure, errno is
847 set to ENOMEM on ANSI C systems.
848
849 If n is zero, malloc returns a minumum-sized chunk. (The minimum
850 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
851 systems.) On most systems, size_t is an unsigned type, so calls
852 with negative arguments are interpreted as requests for huge amounts
853 of space, which will often fail. The maximum supported value of n
854 differs across systems, but is in all cases less than the maximum
855 representable value of a size_t.
856*/
857#if __STD_C
858Void_t* public_mALLOc(size_t) __attribute_malloc__;
859#else
860Void_t* public_mALLOc();
861#endif
862
863/*
864 free(Void_t* p)
865 Releases the chunk of memory pointed to by p, that had been previously
866 allocated using malloc or a related routine such as realloc.
867 It has no effect if p is null. It can have arbitrary (i.e., bad!)
868 effects if p has already been freed.
869
870 Unless disabled (using mallopt), freeing very large spaces will
871 when possible, automatically trigger operations that give
872 back unused memory to the system, thus reducing program footprint.
873*/
874#if __STD_C
875void public_fREe(Void_t*);
876#else
877void public_fREe();
878#endif
879
880/*
881 calloc(size_t n_elements, size_t element_size);
882 Returns a pointer to n_elements * element_size bytes, with all locations
883 set to zero.
884*/
885#if __STD_C
886Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
887#else
888Void_t* public_cALLOc();
889#endif
890
891/*
892 realloc(Void_t* p, size_t n)
893 Returns a pointer to a chunk of size n that contains the same data
894 as does chunk p up to the minimum of (n, p's size) bytes, or null
895 if no space is available.
896
897 The returned pointer may or may not be the same as p. The algorithm
898 prefers extending p when possible, otherwise it employs the
899 equivalent of a malloc-copy-free sequence.
900
901 If p is null, realloc is equivalent to malloc.
902
903 If space is not available, realloc returns null, errno is set (if on
904 ANSI) and p is NOT freed.
905
906 if n is for fewer bytes than already held by p, the newly unused
907 space is lopped off and freed if possible. Unless the #define
908 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
909 zero (re)allocates a minimum-sized chunk.
910
911 Large chunks that were internally obtained via mmap will always
912 be reallocated using malloc-copy-free sequences unless
913 the system supports MREMAP (currently only linux).
914
915 The old unix realloc convention of allowing the last-free'd chunk
916 to be used as an argument to realloc is not supported.
917*/
918#if __STD_C
919Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
920#else
921Void_t* public_rEALLOc();
922#endif
923
924/*
925 memalign(size_t alignment, size_t n);
926 Returns a pointer to a newly allocated chunk of n bytes, aligned
927 in accord with the alignment argument.
928
929 The alignment argument should be a power of two. If the argument is
930 not a power of two, the nearest greater power is used.
931 8-byte alignment is guaranteed by normal malloc calls, so don't
932 bother calling memalign with an argument of 8 or less.
933
934 Overreliance on memalign is a sure way to fragment space.
935*/
936#if __STD_C
937Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
938#else
939Void_t* public_mEMALIGn();
940#endif
941
942/*
943 posix_memalign(void** memptr, size_t alignment, size_t n);
944 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
945 in accord with the alignment argument. Returns 0 on success, otherwise
946 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
947
948 The alignment must be a power of two, and a multiple of sizeof(void *).
949*/
950#if __STD_C
951int public_posix_mEMALIGn(Void_t**, size_t, size_t);
952#else
953int public_posix_mEMALIGn();
954#endif
955
956/*
957 valloc(size_t n);
958 Equivalent to memalign(pagesize, n), where pagesize is the page
959 size of the system. If the pagesize is unknown, 4096 is used.
960*/
961#if __STD_C
962Void_t* public_vALLOc(size_t) __attribute_malloc__;
963#else
964Void_t* public_vALLOc();
965#endif
966
967
968
969/*
970 mallopt(int parameter_number, int parameter_value)
971 Sets tunable parameters The format is to provide a
972 (parameter-number, parameter-value) pair. mallopt then sets the
973 corresponding parameter to the argument value if it can (i.e., so
974 long as the value is meaningful), and returns 1 if successful else
975 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
976 normally defined in malloc.h. Only one of these (M_MXFAST) is used
977 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
978 so setting them has no effect. But this malloc also supports four
979 other options in mallopt. See below for details. Briefly, supported
980 parameters are as follows (listed defaults are for "typical"
981 configurations).
982
983 Symbol param # default allowed param values
984 M_MXFAST 1 64 0-80 (0 disables fastbins)
985 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
986 M_TOP_PAD -2 0 any
987 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
988 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
989*/
990#if __STD_C
991int public_mALLOPt(int, int);
992#else
993int public_mALLOPt();
994#endif
995
996
997/*
998 mallinfo()
999 Returns (by copy) a struct containing various summary statistics:
1000
1001 arena: current total non-mmapped bytes allocated from system
1002 ordblks: the number of free chunks
1003 smblks: the number of fastbin blocks (i.e., small chunks that
1004 have been freed but not use resused or consolidated)
1005 hblks: current number of mmapped regions
1006 hblkhd: total bytes held in mmapped regions
1007 usmblks: the maximum total allocated space. This will be greater
1008 than current total if trimming has occurred.
1009 fsmblks: total bytes held in fastbin blocks
1010 uordblks: current total allocated space (normal or mmapped)
1011 fordblks: total free space
1012 keepcost: the maximum number of bytes that could ideally be released
1013 back to system via malloc_trim. ("ideally" means that
1014 it ignores page restrictions etc.)
1015
1016 Because these fields are ints, but internal bookkeeping may
1017 be kept as longs, the reported values may wrap around zero and
1018 thus be inaccurate.
1019*/
1020#if __STD_C
1021struct mallinfo public_mALLINFo(void);
1022#else
1023struct mallinfo public_mALLINFo();
1024#endif
1025
1026/*
1027 pvalloc(size_t n);
1028 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1029 round up n to nearest pagesize.
1030 */
1031#if __STD_C
1032Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1033#else
1034Void_t* public_pVALLOc();
1035#endif
1036
1037/*
1038 malloc_trim(size_t pad);
1039
1040 If possible, gives memory back to the system (via negative
1041 arguments to sbrk) if there is unused memory at the `high' end of
1042 the malloc pool. You can call this after freeing large blocks of
1043 memory to potentially reduce the system-level memory requirements
1044 of a program. However, it cannot guarantee to reduce memory. Under
1045 some allocation patterns, some large free blocks of memory will be
1046 locked between two used chunks, so they cannot be given back to
1047 the system.
1048
1049 The `pad' argument to malloc_trim represents the amount of free
1050 trailing space to leave untrimmed. If this argument is zero,
1051 only the minimum amount of memory to maintain internal data
1052 structures will be left (one page or less). Non-zero arguments
1053 can be supplied to maintain enough trailing space to service
1054 future expected allocations without having to re-obtain memory
1055 from the system.
1056
1057 Malloc_trim returns 1 if it actually released any memory, else 0.
1058 On systems that do not support "negative sbrks", it will always
1059 rreturn 0.
1060*/
1061#if __STD_C
1062int public_mTRIm(size_t);
1063#else
1064int public_mTRIm();
1065#endif
1066
1067/*
1068 malloc_usable_size(Void_t* p);
1069
1070 Returns the number of bytes you can actually use in
1071 an allocated chunk, which may be more than you requested (although
1072 often not) due to alignment and minimum size constraints.
1073 You can use this many bytes without worrying about
1074 overwriting other allocated objects. This is not a particularly great
1075 programming practice. malloc_usable_size can be more useful in
1076 debugging and assertions, for example:
1077
1078 p = malloc(n);
1079 assert(malloc_usable_size(p) >= 256);
1080
1081*/
1082#if __STD_C
1083size_t public_mUSABLe(Void_t*);
1084#else
1085size_t public_mUSABLe();
1086#endif
1087
1088/*
1089 malloc_stats();
1090 Prints on stderr the amount of space obtained from the system (both
1091 via sbrk and mmap), the maximum amount (which may be more than
1092 current if malloc_trim and/or munmap got called), and the current
1093 number of bytes allocated via malloc (or realloc, etc) but not yet
1094 freed. Note that this is the number of bytes allocated, not the
1095 number requested. It will be larger than the number requested
1096 because of alignment and bookkeeping overhead. Because it includes
1097 alignment wastage as being in use, this figure may be greater than
1098 zero even when no user-level chunks are allocated.
1099
1100 The reported current and maximum system memory can be inaccurate if
1101 a program makes other calls to system memory allocation functions
1102 (normally sbrk) outside of malloc.
1103
1104 malloc_stats prints only the most commonly interesting statistics.
1105 More information can be obtained by calling mallinfo.
1106
1107*/
1108#if __STD_C
1109void public_mSTATs();
1110#else
1111void public_mSTATs();
1112#endif
1113
1114/* mallopt tuning options */
1115
1116/*
1117 M_MXFAST is the maximum request size used for "fastbins", special bins
1118 that hold returned chunks without consolidating their spaces. This
1119 enables future requests for chunks of the same size to be handled
1120 very quickly, but can increase fragmentation, and thus increase the
1121 overall memory footprint of a program.
1122
1123 This malloc manages fastbins very conservatively yet still
1124 efficiently, so fragmentation is rarely a problem for values less
1125 than or equal to the default. The maximum supported value of MXFAST
1126 is 80. You wouldn't want it any higher than this anyway. Fastbins
1127 are designed especially for use with many small structs, objects or
1128 strings -- the default handles structs/objects/arrays with sizes up
1129 to 16 4byte fields, or small strings representing words, tokens,
1130 etc. Using fastbins for larger objects normally worsens
1131 fragmentation without improving speed.
1132
1133 M_MXFAST is set in REQUEST size units. It is internally used in
1134 chunksize units, which adds padding and alignment. You can reduce
1135 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1136 algorithm to be a closer approximation of fifo-best-fit in all cases,
1137 not just for larger requests, but will generally cause it to be
1138 slower.
1139*/
1140
1141
1142/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1143#ifndef M_MXFAST
1144#define M_MXFAST 1
1145#endif
1146
1147#ifndef DEFAULT_MXFAST
1148#define DEFAULT_MXFAST 64
1149#endif
1150
1151
1152/*
1153 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1154 to keep before releasing via malloc_trim in free().
1155
1156 Automatic trimming is mainly useful in long-lived programs.
1157 Because trimming via sbrk can be slow on some systems, and can
1158 sometimes be wasteful (in cases where programs immediately
1159 afterward allocate more large chunks) the value should be high
1160 enough so that your overall system performance would improve by
1161 releasing this much memory.
1162
1163 The trim threshold and the mmap control parameters (see below)
1164 can be traded off with one another. Trimming and mmapping are
1165 two different ways of releasing unused memory back to the
1166 system. Between these two, it is often possible to keep
1167 system-level demands of a long-lived program down to a bare
1168 minimum. For example, in one test suite of sessions measuring
1169 the XF86 X server on Linux, using a trim threshold of 128K and a
1170 mmap threshold of 192K led to near-minimal long term resource
1171 consumption.
1172
1173 If you are using this malloc in a long-lived program, it should
1174 pay to experiment with these values. As a rough guide, you
1175 might set to a value close to the average size of a process
1176 (program) running on your system. Releasing this much memory
1177 would allow such a process to run in memory. Generally, it's
1178 worth it to tune for trimming rather tham memory mapping when a
1179 program undergoes phases where several large chunks are
1180 allocated and released in ways that can reuse each other's
1181 storage, perhaps mixed with phases where there are no such
1182 chunks at all. And in well-behaved long-lived programs,
1183 controlling release of large blocks via trimming versus mapping
1184 is usually faster.
1185
1186 However, in most programs, these parameters serve mainly as
1187 protection against the system-level effects of carrying around
1188 massive amounts of unneeded memory. Since frequent calls to
1189 sbrk, mmap, and munmap otherwise degrade performance, the default
1190 parameters are set to relatively high values that serve only as
1191 safeguards.
1192
1193 The trim value must be greater than page size to have any useful
1194 effect. To disable trimming completely, you can set to
1195 (unsigned long)(-1)
1196
1197 Trim settings interact with fastbin (MXFAST) settings: Unless
1198 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1199 freeing a chunk with size less than or equal to MXFAST. Trimming is
1200 instead delayed until subsequent freeing of larger chunks. However,
1201 you can still force an attempted trim by calling malloc_trim.
1202
1203 Also, trimming is not generally possible in cases where
1204 the main arena is obtained via mmap.
1205
1206 Note that the trick some people use of mallocing a huge space and
1207 then freeing it at program startup, in an attempt to reserve system
1208 memory, doesn't have the intended effect under automatic trimming,
1209 since that memory will immediately be returned to the system.
1210*/
1211
1212#define M_TRIM_THRESHOLD -1
1213
1214#ifndef DEFAULT_TRIM_THRESHOLD
1215#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1216#endif
1217
1218/*
1219 M_TOP_PAD is the amount of extra `padding' space to allocate or
1220 retain whenever sbrk is called. It is used in two ways internally:
1221
1222 * When sbrk is called to extend the top of the arena to satisfy
1223 a new malloc request, this much padding is added to the sbrk
1224 request.
1225
1226 * When malloc_trim is called automatically from free(),
1227 it is used as the `pad' argument.
1228
1229 In both cases, the actual amount of padding is rounded
1230 so that the end of the arena is always a system page boundary.
1231
1232 The main reason for using padding is to avoid calling sbrk so
1233 often. Having even a small pad greatly reduces the likelihood
1234 that nearly every malloc request during program start-up (or
1235 after trimming) will invoke sbrk, which needlessly wastes
1236 time.
1237
1238 Automatic rounding-up to page-size units is normally sufficient
1239 to avoid measurable overhead, so the default is 0. However, in
1240 systems where sbrk is relatively slow, it can pay to increase
1241 this value, at the expense of carrying around more memory than
1242 the program needs.
1243*/
1244
1245#define M_TOP_PAD -2
1246
1247#ifndef DEFAULT_TOP_PAD
1248#define DEFAULT_TOP_PAD (0)
1249#endif
1250
1251/*
1252 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1253 to service a request. Requests of at least this size that cannot
1254 be allocated using already-existing space will be serviced via mmap.
1255 (If enough normal freed space already exists it is used instead.)
1256
1257 Using mmap segregates relatively large chunks of memory so that
1258 they can be individually obtained and released from the host
1259 system. A request serviced through mmap is never reused by any
1260 other request (at least not directly; the system may just so
1261 happen to remap successive requests to the same locations).
1262
1263 Segregating space in this way has the benefits that:
1264
1265 1. Mmapped space can ALWAYS be individually released back
1266 to the system, which helps keep the system level memory
1267 demands of a long-lived program low.
1268 2. Mapped memory can never become `locked' between
1269 other chunks, as can happen with normally allocated chunks, which
1270 means that even trimming via malloc_trim would not release them.
1271 3. On some systems with "holes" in address spaces, mmap can obtain
1272 memory that sbrk cannot.
1273
1274 However, it has the disadvantages that:
1275
1276 1. The space cannot be reclaimed, consolidated, and then
1277 used to service later requests, as happens with normal chunks.
1278 2. It can lead to more wastage because of mmap page alignment
1279 requirements
1280 3. It causes malloc performance to be more dependent on host
1281 system memory management support routines which may vary in
1282 implementation quality and may impose arbitrary
1283 limitations. Generally, servicing a request via normal
1284 malloc steps is faster than going through a system's mmap.
1285
1286 The advantages of mmap nearly always outweigh disadvantages for
1287 "large" chunks, but the value of "large" varies across systems. The
1288 default is an empirically derived value that works well in most
1289 systems.
1290*/
1291
1292#define M_MMAP_THRESHOLD -3
1293
1294#ifndef DEFAULT_MMAP_THRESHOLD
1295#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1296#endif
1297
1298/*
1299 M_MMAP_MAX is the maximum number of requests to simultaneously
1300 service using mmap. This parameter exists because
1301. Some systems have a limited number of internal tables for
1302 use by mmap, and using more than a few of them may degrade
1303 performance.
1304
1305 The default is set to a value that serves only as a safeguard.
1306 Setting to 0 disables use of mmap for servicing large requests. If
1307 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1308 to non-zero values in mallopt will fail.
1309*/
1310
1311#define M_MMAP_MAX -4
1312
1313#ifndef DEFAULT_MMAP_MAX
1314#define DEFAULT_MMAP_MAX (65536)
1315#endif
1316
1317#ifdef __cplusplus
1318}; /* end of extern "C" */
1319#endif
1320
1321/*
1322 ========================================================================
1323 To make a fully customizable malloc.h header file, cut everything
1324 above this line, put into file malloc.h, edit to suit, and #include it
1325 on the next line, as well as in programs that use this malloc.
1326 ========================================================================
1327*/
1328
1329/* #include "malloc.h" */
1330
1331/* --------------------- public wrappers ---------------------- */
1332
1333#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1334
1335/* DL_STATIC used to make functions (deep down) consistent
1336 * with prototypes (otherwise the prototypes are static
1337 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1338 * The gcc compiler doesn't care, but the HP-UX compiler does.
1339 */
1340#define DL_STATIC static
1341
1342/* Declare all routines as internal */
1343#if __STD_C
1344static Void_t* mALLOc(size_t) __attribute_malloc__;
1345static void fREe(Void_t*);
1346static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1347static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1348static int posix_mEMALIGn(Void_t**, size_t, size_t);
1349static Void_t* vALLOc(size_t) __attribute_malloc__;
1350static Void_t* pVALLOc(size_t) __attribute_malloc__;
1351static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1352static int mTRIm(size_t);
1353static size_t mUSABLe(Void_t*);
1354static void mSTATs();
1355static int mALLOPt(int, int);
1356static struct mallinfo mALLINFo(void);
1357#else
1358static Void_t* mALLOc();
1359static void fREe();
1360static Void_t* rEALLOc();
1361static Void_t* mEMALIGn();
1362static int posix_mEMALIGn();
1363static Void_t* vALLOc();
1364static Void_t* pVALLOc();
1365static Void_t* cALLOc();
1366static int mTRIm();
1367static size_t mUSABLe();
1368static void mSTATs();
1369static int mALLOPt();
1370static struct mallinfo mALLINFo();
1371#endif
1372
1373/*
1374 MALLOC_PREACTION and MALLOC_POSTACTION should be
1375 defined to return 0 on success, and nonzero on failure.
1376 The return value of MALLOC_POSTACTION is currently ignored
1377 in wrapper functions since there is no reasonable default
1378 action to take on failure.
1379*/
1380
1381
1382#ifdef USE_MALLOC_LOCK
1383
1384# ifdef WIN32
1385
1386static int mALLOC_MUTEx;
1387#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1388#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1389int dnmalloc_pthread_init(void) { return 0; }
1390
1391# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1392
1393# if defined(__NetBSD__)
1394#include <reentrant.h>
1395extern int __isthreaded;
1396static mutex_t thread_lock = MUTEX_INITIALIZER;
1397#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1398#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1399void _malloc_prefork(void) { _MALLOC_LOCK(); }
1400void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1401# endif
1402
1403# if defined(__OpenBSD__)
1404extern int __isthreaded;
1405void _thread_malloc_lock(void);
1406void _thread_malloc_unlock(void);
1407#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1408#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1409# endif
1410
1411# if defined(__FreeBSD__)
1412extern int __isthreaded;
1413struct _spinlock {
1414 volatile long access_lock;
1415 volatile long lock_owner;
1416 volatile char *fname;
1417 volatile int lineno;
1418};
1419typedef struct _spinlock spinlock_t;
1420#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1421void _spinlock(spinlock_t *);
1422void _spinunlock(spinlock_t *);
1423/* # include "/usr/src/lib/libc/include/spinlock.h" */
1424static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1425spinlock_t *__malloc_lock = &thread_lock;
1426#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1427#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1428# endif
1429
1430/* Common for all three *BSD
1431 */
1432static int malloc_active = 0;
1433static int dnmalloc_mutex_lock()
1434{
1435 _MALLOC_LOCK();
1436 if (!malloc_active)
1437 {
1438 ++malloc_active;
1439 return 0;
1440 }
1441 assert(malloc_active == 0);
1442 _MALLOC_UNLOCK();
1443 errno = EDEADLK;
1444 return 1;
1445}
1446static int dnmalloc_mutex_unlock()
1447{
1448 --malloc_active;
1449 _MALLOC_UNLOCK();
1450 return 0;
1451}
1452#define MALLOC_PREACTION dnmalloc_mutex_lock()
1453#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1454int dnmalloc_pthread_init(void) { return 0; }
1455
1456# else
1457
1458/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1459 *
1460 * Works fine on linux (no malloc in pthread_mutex_lock)
1461 * Works with on HP-UX if initialized after entering main()
1462 */
1463#include <pthread.h>
1464static int malloc_active = 0;
1465void dnmalloc_fork_prepare(void);
1466void dnmalloc_fork_parent(void);
1467void dnmalloc_fork_child(void);
1468
1469#if !defined(__linux__)
1470
1471static pthread_mutex_t mALLOC_MUTEx;
1472pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1473static int dnmalloc_use_mutex = 0;
1474static void dnmalloc_pthread_init_int(void)
1475{
1476 pthread_mutexattr_t mta;
1477 pthread_mutexattr_init(&mta);
1478 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1479 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1480 pthread_mutexattr_destroy(&mta);
1481 pthread_atfork(dnmalloc_fork_prepare,
1482 dnmalloc_fork_parent,
1483 dnmalloc_fork_child);
1484 dnmalloc_use_mutex = 1;
1485}
1486int dnmalloc_pthread_init(void)
1487{
1488 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1489}
1490
1491#else
1492
1493static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1494static int dnmalloc_use_mutex = 1;
1495int dnmalloc_pthread_init(void) {
1496 return pthread_atfork(dnmalloc_fork_prepare,
1497 dnmalloc_fork_parent,
1498 dnmalloc_fork_child);
1499}
1500#endif /* !defined(__linux__) */
1501
1502void dnmalloc_fork_prepare(void) {
1503 if (dnmalloc_use_mutex)
1504 pthread_mutex_lock(&mALLOC_MUTEx);
1505}
1506void dnmalloc_fork_parent(void) {
1507 if (dnmalloc_use_mutex)
1508 pthread_mutex_unlock(&mALLOC_MUTEx);
1509}
1510void dnmalloc_fork_child(void) {
1511#ifdef __GLIBC__
1512 if (dnmalloc_use_mutex)
1513 pthread_mutex_init(&mALLOC_MUTEx, NULL);
1514#else
1515 if (dnmalloc_use_mutex)
1516 pthread_mutex_unlock(&mALLOC_MUTEx);
1517#endif
1518}
1519static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1520{
1521 if (dnmalloc_use_mutex)
1522 {
1523 int rc = pthread_mutex_lock(mutex);
1524 if (rc == 0)
1525 {
1526 if (!malloc_active)
1527 {
1528 ++malloc_active;
1529 return 0;
1530 }
1531 assert(malloc_active == 0);
1532 (void) pthread_mutex_unlock(mutex);
1533 errno = EDEADLK;
1534 return 1;
1535 }
1536 return rc;
1537 }
1538 return 0;
1539}
1540static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1541{
1542 if (dnmalloc_use_mutex)
1543 {
1544 --malloc_active;
1545 return pthread_mutex_unlock(mutex);
1546 }
1547 return 0;
1548}
1549# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1550# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1551
1552# endif
1553
1554#else
1555
1556/* Substitute anything you like for these */
1557
1558# define MALLOC_PREACTION (0)
1559# define MALLOC_POSTACTION (0)
1560int dnmalloc_pthread_init(void) { return 0; }
1561
1562#endif /* USE_MALLOC_LOCK */
1563
1564Void_t* public_mALLOc(size_t bytes) {
1565 Void_t* m;
1566 if (MALLOC_PREACTION == 0) {
1567 m = mALLOc(bytes);
1568 (void) MALLOC_POSTACTION;
1569 return m;
1570 }
1571 return 0;
1572}
1573
1574void public_fREe(Void_t* m) {
1575 if (MALLOC_PREACTION == 0) {
1576 fREe(m);
1577 (void) MALLOC_POSTACTION;
1578 }
1579}
1580
1581Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1582 if (MALLOC_PREACTION == 0) {
1583 m = rEALLOc(m, bytes);
1584 (void) MALLOC_POSTACTION;
1585 return m;
1586 }
1587 return 0;
1588}
1589
1590Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1591 Void_t* m;
1592 if (MALLOC_PREACTION == 0) {
1593 m = mEMALIGn(alignment, bytes);
1594 (void) MALLOC_POSTACTION;
1595 return m;
1596 }
1597 return 0;
1598}
1599
1600int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1601 int m, ret;
1602 if ((ret = MALLOC_PREACTION) == 0) {
1603 m = posix_mEMALIGn(memptr, alignment, bytes);
1604 (void) MALLOC_POSTACTION;
1605 return m;
1606 }
1607 return ret;
1608}
1609
1610Void_t* public_vALLOc(size_t bytes) {
1611 Void_t* m;
1612 if (MALLOC_PREACTION == 0) {
1613 m = vALLOc(bytes);
1614 (void) MALLOC_POSTACTION;
1615 return m;
1616 }
1617 return 0;
1618}
1619
1620Void_t* public_pVALLOc(size_t bytes) {
1621 Void_t* m;
1622 if (MALLOC_PREACTION == 0) {
1623 m = pVALLOc(bytes);
1624 (void) MALLOC_POSTACTION;
1625 return m;
1626 }
1627 return 0;
1628}
1629
1630Void_t* public_cALLOc(size_t n, size_t elem_size) {
1631 Void_t* m;
1632 if (MALLOC_PREACTION == 0) {
1633 m = cALLOc(n, elem_size);
1634 (void) MALLOC_POSTACTION;
1635 return m;
1636 }
1637 return 0;
1638}
1639
1640int public_mTRIm(size_t s) {
1641 int result;
1642 if (MALLOC_PREACTION == 0) {
1643 result = mTRIm(s);
1644 (void) MALLOC_POSTACTION;
1645 return result;
1646 }
1647 return 0;
1648}
1649
1650size_t public_mUSABLe(Void_t* m) {
1651 size_t result;
1652 if (MALLOC_PREACTION == 0) {
1653 result = mUSABLe(m);
1654 (void) MALLOC_POSTACTION;
1655 return result;
1656 }
1657 return 0;
1658}
1659
1660void public_mSTATs() {
1661 if (MALLOC_PREACTION == 0) {
1662 mSTATs();
1663 (void) MALLOC_POSTACTION;
1664 }
1665}
1666
1667struct mallinfo public_mALLINFo() {
1668 struct mallinfo m;
1669 if (MALLOC_PREACTION == 0) {
1670 m = mALLINFo();
1671 (void) MALLOC_POSTACTION;
1672 return m;
1673 } else {
1674 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1675 return nm;
1676 }
1677}
1678
1679int public_mALLOPt(int p, int v) {
1680 int result;
1681 if (MALLOC_PREACTION == 0) {
1682 result = mALLOPt(p, v);
1683 (void) MALLOC_POSTACTION;
1684 return result;
1685 }
1686 return 0;
1687}
1688
1689#else
1690
1691int dnmalloc_pthread_init(void) { return 0; }
1692#define DL_STATIC
1693
1694#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1695
1696
1697
1698/* ------------- Optional versions of memcopy ---------------- */
1699
1700
1701#if USE_MEMCPY
1702
1703/*
1704 Note: memcpy is ONLY invoked with non-overlapping regions,
1705 so the (usually slower) memmove is not needed.
1706*/
1707
1708#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1709#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1710
1711#else /* !USE_MEMCPY */
1712
1713/* Use Duff's device for good zeroing/copying performance. */
1714
1715#define MALLOC_ZERO(charp, nbytes) \
1716do { \
1717 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1718 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1719 long mcn; \
1720 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1721 switch (mctmp) { \
1722 case 0: for(;;) { *mzp++ = 0; \
1723 case 7: *mzp++ = 0; \
1724 case 6: *mzp++ = 0; \
1725 case 5: *mzp++ = 0; \
1726 case 4: *mzp++ = 0; \
1727 case 3: *mzp++ = 0; \
1728 case 2: *mzp++ = 0; \
1729 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1730 } \
1731} while(0)
1732
1733#define MALLOC_COPY(dest,src,nbytes) \
1734do { \
1735 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1736 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1737 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1738 long mcn; \
1739 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1740 switch (mctmp) { \
1741 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1742 case 7: *mcdst++ = *mcsrc++; \
1743 case 6: *mcdst++ = *mcsrc++; \
1744 case 5: *mcdst++ = *mcsrc++; \
1745 case 4: *mcdst++ = *mcsrc++; \
1746 case 3: *mcdst++ = *mcsrc++; \
1747 case 2: *mcdst++ = *mcsrc++; \
1748 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1749 } \
1750} while(0)
1751
1752#endif
1753
1754/* ------------------ MMAP support ------------------ */
1755
1756
1757#if defined(HAVE_FCNTL_H)
1758#include <fcntl.h>
1759#endif
1760
1761#if defined(HAVE_SYS_MMAN_H)
1762#include <sys/mman.h>
1763#endif
1764
1765#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1766#define MAP_ANONYMOUS MAP_ANON
1767#endif
1768
1769/*
1770 Nearly all versions of mmap support MAP_ANONYMOUS,
1771 so the following is unlikely to be needed, but is
1772 supplied just in case.
1773*/
1774
1775#ifndef MAP_ANONYMOUS
1776
1777/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1778 */
1779void * anon_mmap (void *addr, size_t length, int prot, int flags)
1780{
1781 void * retval = NULL;
1782 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1783
1784 dev_zero_fd = open("/dev/zero", O_RDWR);
1785 if (dev_zero_fd >= 0)
1786 {
1787 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1788 /* closing the file descriptor does not unmap the region */
1789 close(dev_zero_fd);
1790 }
1791 return retval;
1792}
1793
1794#define MMAP(addr, size, prot, flags) \
1795 (anon_mmap((addr), (size), (prot), (flags)))
1796
1797
1798#else /* have MAP_ANONYMOUS */
1799
1800#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1801#define MAP_32BIT MAP_ADDR32
1802#endif
1803
1804#if defined(MAP_32BIT)
1805#define MMAP(addr, size, prot, flags) \
1806 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1807#elif defined(__sun)
1808/*
1809 * Hint an address within 32bit address space
1810 */
1811#define MMAP(addr, size, prot, flags) \
1812 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1813#else
1814/* *BSD */
1815#define MMAP(addr, size, prot, flags) \
1816 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1817#endif
1818
1819#endif /* have MAP_ANONYMOUS */
1820
1821
1822/*
1823 ----------------------- Chunk representations -----------------------
1824*/
1825
1826typedef void * mchunkptr;
1827
1828struct chunkinfo {
1829 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1830 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1831 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1832 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1833 in the linked list if the hash
1834 value is the same as the chunk */
1835 struct chunkinfo* fd; /* double links -- used only if free. */
1836 struct chunkinfo* bk;
1837 mchunkptr chunk;
1838};
1839
1840typedef struct chunkinfo* chunkinfoptr;
1841
1842struct cireginfo {
1843 unsigned long position;
1844 unsigned long *freebitmap;
1845 struct cireginfo* next;
1846 struct chunkinfo *freelist;
1847 struct chunkinfo *begin;
1848 unsigned long freecounter;
1849};
1850
1851/*
1852 ---------- Size and alignment checks and conversions ----------
1853*/
1854
1855/* conversion from malloc headers to user pointers, and back */
1856#define chunk(p) (p->chunk)
1857
1858
1859#define chunk2mem(p) (chunk(p))
1860#define mem2chunk(mem) (hashtable_lookup(mem))
1861
1862/* The smallest possible chunk */
1863#define MIN_CHUNK_SIZE 16
1864
1865/* The smallest size we can malloc is an aligned minimal chunk */
1866
1867#define MINSIZE \
1868 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1869
1870/* Check if m has acceptable alignment */
1871
1872#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1873
1874#define GUARD_SIZE 4
1875
1876/*
1877 Check if a request is so large that it would wrap around zero when
1878 padded and aligned. To simplify some other code, the bound is made
1879 low enough so that adding MINSIZE will also not wrap around zero.
1880
1881 Make it 4*MINSIZE.
1882*/
1883
1884#define REQUEST_OUT_OF_RANGE(req) \
1885 ((CHUNK_SIZE_T)(req) >= \
1886 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1887
1888/* pad request bytes into a usable size -- internal version */
1889
1890#define request2size(req) \
1891 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1892 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1893 MINSIZE)
1894
1895/* Same, except also perform argument check */
1896
1897#define checked_request2size(req, sz) \
1898 if (!REQUEST_OUT_OF_RANGE(req)) { \
1899 (sz) = request2size(req); \
1900 assert((sz-req) >= GUARD_SIZE); \
1901 } else { \
1902 MALLOC_FAILURE_ACTION; \
1903 return 0; \
1904 }
1905
1906#if PARANOIA > 2
1907static char * guard_set_p;
1908static char * guard_set_q;
1909
1910#define guard_set(guard, P, request, sz) \
1911 assert((sz-request) >= GUARD_SIZE); \
1912 guard_set_p = (char*)(chunk(P)); \
1913 guard_set_p += request; \
[172]1914 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
[171]1915 guard_set_q = (char*)(guard); \
1916 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1917 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1918 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1919 *guard_set_p = *guard_set_q; \
[172]1920 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
[171]1921 (P)->req = request
1922
1923#define guard_check(guard, P) \
[172]1924 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1925 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1926 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
[171]1927
1928#else
1929#define guard_set(guard, P, request, sz) ((void)0)
1930#define guard_check(guard, P) ((void)0)
1931#endif /* PARANOIA > 2 */
1932
1933/* dnmalloc forward declarations */
1934static char * dnmalloc_arc4random(void);
1935static void dnmalloc_init (void);
1936static void malloc_mmap_state(void);
1937static void cireg_extend (void);
1938static chunkinfoptr cireg_getfree (void);
1939static void hashtable_add (chunkinfoptr ci);
1940static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1941static void hashtable_remove (mchunkptr p);
1942static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1943static chunkinfoptr hashtable_lookup (mchunkptr p);
1944static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1945static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1946
1947
1948
1949/*
1950 --------------- Physical chunk operations ---------------
1951*/
1952
1953
1954/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1955#define PREV_INUSE 0x1
1956
1957/* extract inuse bit of previous chunk */
1958#define prev_inuse(p) ((p)->size & PREV_INUSE)
1959
1960/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1961#define IS_MMAPPED 0x2
1962
1963/* check for mmap()'ed chunk */
1964#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1965
1966
1967/* size field is or'ed when the chunk is in use */
1968#define INUSE 0x4
1969
1970/* extract inuse bit of chunk */
1971#define inuse(p) ((p)->size & INUSE)
1972
1973/*
1974 Bits to mask off when extracting size
1975
1976 Note: IS_MMAPPED is intentionally not masked off from size field in
1977 macros for which mmapped chunks should never be seen. This should
1978 cause helpful core dumps to occur if it is tried by accident by
1979 people extending or adapting this malloc.
1980*/
1981#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
1982
1983/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
1984#define SIZE_NOMMAP (PREV_INUSE|INUSE)
1985
1986/* Get size, ignoring use bits */
1987#define chunksize(p) ((p)->size & ~(SIZE_BITS))
1988
1989/* Ptr to chunkinfo of next physical malloc_chunk. */
1990#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
1991
1992/* Treat space at ptr + offset as a chunk */
1993#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1994
1995/* set/clear chunk as being inuse without otherwise disturbing */
1996#define set_inuse(p) ((p)->size |= INUSE)
1997
1998#define clear_inuse(p) ((p)->size &= ~(INUSE))
1999
2000#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2001
2002#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2003
2004static void set_previnuse_next (chunkinfoptr p)
2005{
2006 chunkinfoptr q;
2007 q = next_chunkinfo (p);
2008 if (q)
2009 set_previnuse (q);
2010}
2011
2012#define set_all_inuse(p) \
2013set_inuse(p); \
2014set_previnuse_next(p);
2015
2016
2017/* Set size at head, without disturbing its use bit */
2018#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2019
2020/* Set size/use field */
2021#define set_head(p, s) ((p)->size = (s))
2022
2023/*
2024 Bins
2025
2026 An array of bin headers for free chunks. Each bin is doubly
2027 linked. The bins are approximately proportionally (log) spaced.
2028 There are a lot of these bins (128). This may look excessive, but
2029 works very well in practice. Most bins hold sizes that are
2030 unusual as malloc request sizes, but are more usual for fragments
2031 and consolidated sets of chunks, which is what these bins hold, so
2032 they can be found quickly. All procedures maintain the invariant
2033 that no consolidated chunk physically borders another one, so each
2034 chunk in a list is known to be preceeded and followed by either
2035 inuse chunks or the ends of memory.
2036
2037 Chunks in bins are kept in size order, with ties going to the
2038 approximately least recently used chunk. Ordering isn't needed
2039 for the small bins, which all contain the same-sized chunks, but
2040 facilitates best-fit allocation for larger chunks. These lists
2041 are just sequential. Keeping them in order almost never requires
2042 enough traversal to warrant using fancier ordered data
2043 structures.
2044
2045 Chunks of the same size are linked with the most
2046 recently freed at the front, and allocations are taken from the
2047 back. This results in LRU (FIFO) allocation order, which tends
2048 to give each chunk an equal opportunity to be consolidated with
2049 adjacent freed chunks, resulting in larger free chunks and less
2050 fragmentation.
2051
2052 To simplify use in double-linked lists, each bin header acts
2053 as a malloc_chunk. This avoids special-casing for headers.
2054 But to conserve space and improve locality, we allocate
2055 only the fd/bk pointers of bins, and then use repositioning tricks
2056 to treat these as the fields of a malloc_chunk*.
2057*/
2058
2059typedef struct chunkinfo* mbinptr;
2060
2061/* addressing -- note that bin_at(0) does not exist */
2062#define bin_at(m, i) (&(m)->bins[i])
2063
2064/* analog of ++bin */
2065#define next_bin(b) (b+1)
2066
2067/* Reminders about list directionality within bins */
2068#define first(b) ((b)->fd)
2069#define last(b) ((b)->bk)
2070
2071/* Take a chunk off a bin list */
2072#define unlink(P, BK, FD) { \
2073 FD = P->fd; \
2074 BK = P->bk; \
2075 FD->bk = BK; \
2076 BK->fd = FD; \
2077}
2078
2079/*
2080 Indexing
2081
2082 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2083 8 bytes apart. Larger bins are approximately logarithmically spaced:
2084
2085 64 bins of size 8
2086 32 bins of size 64
2087 16 bins of size 512
2088 8 bins of size 4096
2089 4 bins of size 32768
2090 2 bins of size 262144
2091 1 bin of size what's left
2092
2093 The bins top out around 1MB because we expect to service large
2094 requests via mmap.
2095*/
2096
2097#define NBINS 96
2098#define NSMALLBINS 32
2099#define SMALLBIN_WIDTH 8
2100#define MIN_LARGE_SIZE 256
2101
2102#define in_smallbin_range(sz) \
2103 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2104
2105#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2106
2107/*
2108 Compute index for size. We expect this to be inlined when
2109 compiled with optimization, else not, which works out well.
2110*/
2111static int largebin_index(size_t sz) {
2112
2113 unsigned long xx = sz >> SMALLBIN_WIDTH;
2114
2115 if (xx < 0x10000)
2116 {
2117 unsigned int m; /* bit position of highest set bit of m */
2118
2119 /* On intel, use BSRL instruction to find highest bit */
2120#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2121
2122 unsigned int x = (unsigned int) xx;
2123
2124 __asm__("bsrl %1,%0\n\t"
2125 : "=r" (m)
2126 : "rm" (x));
2127
2128#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2129
2130 __asm__("bsrq %1,%0\n\t"
2131 : "=r" (m)
2132 : "rm" (xx));
2133
2134#else
2135
2136 /* Taken from Bit Twiddling Hacks
2137 * http://graphics.stanford.edu/~seander/bithacks.html
2138 * public domain
2139 */
2140 unsigned int v = (unsigned int) xx;
2141 register unsigned int shift;
2142
2143 m = (v > 0xFFFF) << 4; v >>= m;
2144 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2145 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2146 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2147 m |= (v >> 1);
2148
2149#endif
2150
2151 /* Use next 2 bits to create finer-granularity bins */
2152 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2153 }
2154 else
2155 {
2156 return NBINS-1;
2157 }
2158}
2159
2160#define bin_index(sz) \
2161 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2162
2163/*
2164 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2165 first bin that is maintained in sorted order. This must
2166 be the smallest size corresponding to a given bin.
2167
2168 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2169 best fit guarantees to sometimes speed up malloc by increasing value.
2170 Doing this means that malloc may choose a chunk that is
2171 non-best-fitting by up to the width of the bin.
2172
2173 Some useful cutoff values:
2174 512 - all bins sorted
2175 2560 - leaves bins <= 64 bytes wide unsorted
2176 12288 - leaves bins <= 512 bytes wide unsorted
2177 65536 - leaves bins <= 4096 bytes wide unsorted
2178 262144 - leaves bins <= 32768 bytes wide unsorted
2179 -1 - no bins sorted (not recommended!)
2180*/
2181
2182/* #define FIRST_SORTED_BIN_SIZE 65536 */
2183
2184#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2185
[172]2186
[171]2187/*
2188 Unsorted chunks
2189
2190 All remainders from chunk splits, as well as all returned chunks,
2191 are first placed in the "unsorted" bin. They are then placed
2192 in regular bins after malloc gives them ONE chance to be used before
2193 binning. So, basically, the unsorted_chunks list acts as a queue,
2194 with chunks being placed on it in free (and malloc_consolidate),
2195 and taken off (to be either used or placed in bins) in malloc.
2196*/
2197
2198/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2199#define unsorted_chunks(M) (bin_at(M, 1))
2200
2201/*
2202 Top
2203
2204 The top-most available chunk (i.e., the one bordering the end of
2205 available memory) is treated specially. It is never included in
2206 any bin, is used only if no other chunk is available, and is
2207 released back to the system if it is very large (see
2208 M_TRIM_THRESHOLD). Because top initially
2209 points to its own bin with initial zero size, thus forcing
2210 extension on the first malloc request, we avoid having any special
2211 code in malloc to check whether it even exists yet. But we still
2212 need to do so when getting memory from system, so we make
2213 initial_top treat the bin as a legal but unusable chunk during the
2214 interval between initialization and the first call to
2215 sYSMALLOc. (This is somewhat delicate, since it relies on
2216 the 2 preceding words to be zero during this interval as well.)
2217*/
2218
2219/* Conveniently, the unsorted bin can be used as dummy top on first call */
2220#define initial_top(M) (unsorted_chunks(M))
2221
2222/*
2223 Binmap
2224
2225 To help compensate for the large number of bins, a one-level index
2226 structure is used for bin-by-bin searching. `binmap' is a
2227 bitvector recording whether bins are definitely empty so they can
2228 be skipped over during during traversals. The bits are NOT always
2229 cleared as soon as bins are empty, but instead only
2230 when they are noticed to be empty during traversal in malloc.
2231*/
2232
2233/* Conservatively use 32 bits per map word, even if on 64bit system */
2234#define BINMAPSHIFT 5
2235#define BITSPERMAP (1U << BINMAPSHIFT)
2236#define BINMAPSIZE (NBINS / BITSPERMAP)
2237
2238#define idx2block(i) ((i) >> BINMAPSHIFT)
2239#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2240
2241#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2242#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2243#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2244
2245/*
2246 Fastbins
2247
2248 An array of lists holding recently freed small chunks. Fastbins
2249 are not doubly linked. It is faster to single-link them, and
2250 since chunks are never removed from the middles of these lists,
2251 double linking is not necessary. Also, unlike regular bins, they
2252 are not even processed in FIFO order (they use faster LIFO) since
2253 ordering doesn't much matter in the transient contexts in which
2254 fastbins are normally used.
2255
2256 Chunks in fastbins keep their inuse bit set, so they cannot
2257 be consolidated with other free chunks. malloc_consolidate
2258 releases all chunks in fastbins and consolidates them with
2259 other free chunks.
2260*/
2261
2262typedef struct chunkinfo* mfastbinptr;
2263
2264/* offset 2 to use otherwise unindexable first 2 bins */
2265#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2266
2267/* The maximum fastbin request size we support */
2268#define MAX_FAST_SIZE 80
2269
2270#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2271
2272/*
2273 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2274 that triggers automatic consolidation of possibly-surrounding
2275 fastbin chunks. This is a heuristic, so the exact value should not
2276 matter too much. It is defined at half the default trim threshold as a
2277 compromise heuristic to only attempt consolidation if it is likely
2278 to lead to trimming. However, it is not dynamically tunable, since
2279 consolidation reduces fragmentation surrounding loarge chunks even
2280 if trimming is not used.
2281*/
2282
2283#define FASTBIN_CONSOLIDATION_THRESHOLD \
2284 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2285
2286/*
2287 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2288 they are used as flags.
2289*/
2290
2291/*
2292 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2293 freed chunks at all. It is set true when entering a chunk into any
2294 bin.
2295*/
2296
2297#define ANYCHUNKS_BIT (1U)
2298
2299#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2300#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2301#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2302
2303/*
2304 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2305 some fastbin chunks. It is set true on entering a chunk into any
2306 fastbin, and cleared only in malloc_consolidate.
2307*/
2308
2309#define FASTCHUNKS_BIT (2U)
2310
2311#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2312#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2313#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2314
2315/*
2316 Set value of max_fast.
2317 Use impossibly small value if 0.
2318*/
2319
2320#define set_max_fast(M, s) \
2321 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2322 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2323
2324#define get_max_fast(M) \
2325 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2326
2327
2328/*
2329 morecore_properties is a status word holding dynamically discovered
2330 or controlled properties of the morecore function
2331*/
2332
2333#define MORECORE_CONTIGUOUS_BIT (1U)
2334
2335#define contiguous(M) \
2336 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2337#define noncontiguous(M) \
2338 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2339#define set_contiguous(M) \
2340 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2341#define set_noncontiguous(M) \
2342 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2343
2344#define MORECORE_32BIT_BIT (2U)
2345
2346#define morecore32bit(M) \
2347 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2348#define nonmorecore32bit(M) \
2349 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2350#define set_morecore32bit(M) \
2351 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2352#define set_nonmorecore32bit(M) \
2353 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2354
2355
2356
2357/* ----------------- dnmalloc -------------------- */
2358
2359/* size of pages */
2360#define PGSIZE malloc_getpagesize
2361/* pointer size */
2362#define PTRSIZE sizeof(long)
2363
2364
2365
2366/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2367 address space: the hashtable has granularity of 16*8, set it to something
2368 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2369 page)
2370*/
2371
2372/* The maximum heap size that dnmalloc can operate with
2373 * represented in hex to avoid annoying gcc warning
2374 *
2375 * Avoid integer overflow, cover complete 32bit address
2376 * space for portability. With deferred allocation, the
2377 * hashtable size is a non-issue.
2378 */
2379#define HEAPMAXSIZE_HALF 0x80000000UL
2380
2381/* How many elements are stored in the linked list */
2382#define LINKEDLSTELS 8
2383
2384/* Minimum size of a chunk */
2385
[172]2386#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2387# define MINCHUNKSIZE 32
2388#else
2389# define MINCHUNKSIZE 16
2390#endif
2391
2392
[171]2393/* The amount of hashtable entries for each page:
2394Pagesize divded by the numer of elements in the linkedlists
2395divided by the minimum chunk size
2396*/
2397#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2398
2399/* The amount of hashtable entries needed to manage the memory:
2400Maximum heap size divided by page size multiplied by the amount
2401of chunk info's per page
2402*/
2403#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2404
2405/* Initial size of the map for the hashtable
2406Amount of entries muliplied by pointer size
2407*/
2408#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2409
2410/* Amount of free chunks that the system should allocate at the start */
2411#define NUMBER_FREE_CHUNKS 32768
2412
2413/* Initial size of the chunk info region,
2414also used when growing the region */
2415#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2416
2417/* Start address of the heap */
2418char *startheap;
2419
2420/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2421chunkinfoptr *hashtable;
2422
2423/* Current chunkinfo region */
2424struct cireginfo *currciinfo = 0;
2425struct cireginfo *firstciinfo = 0;
2426
2427unsigned long totalcictr = 0;
2428
2429
2430/* Initialize the area for chunkinfos and the hashtable and protect
2431 * it with non-writable pages
2432 */
2433static void
2434dnmalloc_init ()
2435{
2436 void *hashtb;
2437 int mprot;
2438 int flags = MAP_PRIVATE;
2439
2440 /* Allocate the malloc_state struct */
2441 malloc_mmap_state();
2442
2443 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2444 * systems use defered allocation anyway.
2445 */
2446#ifdef MAP_NORESERVE
2447 flags |= MAP_NORESERVE;
2448#endif
2449
2450 /* Always start at 0, hashtable covers whole 32bit address space
2451 */
2452#define STARTHEAP_IS_ZERO
2453 startheap = 0;
2454
2455 /* Map space for the hashtable */
2456#if PARANOIA > 1
2457 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2458#else
2459 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2460#endif
2461
2462#ifdef NDEBUG
2463 if (hashtb == MAP_FAILED) {
2464 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2465 abort ();
2466 }
2467#else
2468 assert(hashtb != MAP_FAILED);
2469#endif
2470
2471 /* Protect the hashtable with non-writable pages */
2472 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2473#ifdef NDEBUG
2474 if (mprot == -1) {
2475 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2476 strerror (errno));
2477 abort ();
2478 }
2479#else
2480 assert(mprot != -1);
2481#endif
2482
2483 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2484 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2485
2486 /* Protect the hashtable with non-writable pages */
2487#if PARANOIA > 1
2488 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2489#ifdef NDEBUG
2490 if (mprot == -1) {
2491 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2492 strerror (errno));
2493 abort ();
2494 }
2495#else
2496 assert(mprot != -1);
2497#endif
2498#endif
2499}
2500
2501
2502
2503/* Extend the region for chunk infos by mapping more memory before the region */
2504static void
2505cireg_extend ()
2506{
2507 void *newcireg;
2508 int mprot;
2509 struct cireginfo *tempciinfo = 0;
2510
2511#if PARANOIA > 1
2512 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2513#else
2514 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2515#endif
2516
2517#ifdef NDEBUG
2518 if (newcireg == MAP_FAILED)
2519 {
2520 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2521 strerror (errno));
2522 abort ();
2523 }
2524#else
2525 assert(newcireg != MAP_FAILED);
2526#endif
2527 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2528#ifdef NDEBUG
2529 if (mprot == -1) {
2530 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2531 strerror (errno));
2532 abort ();
2533 }
2534#else
2535 assert(mprot != -1);
2536#endif
2537 newcireg = ((char*)newcireg)+PGSIZE;
2538
2539#if PARANOIA > 1
2540 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2541#ifdef NDEBUG
2542 if (mprot == -1) {
2543 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2544 strerror (errno));
2545 abort ();
2546 }
2547#else
2548 assert(mprot != -1);
2549#endif
2550#endif
2551
2552 tempciinfo = currciinfo;
2553 currciinfo = (struct cireginfo *) newcireg;
2554 if (tempciinfo)
2555 tempciinfo->next = currciinfo;
2556 currciinfo->position = 1;
2557 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2558 if (!firstciinfo)
2559 firstciinfo = currciinfo;
2560 totalcictr++;
[172]2561 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
[171]2562}
2563
2564
2565/* Get a free chunkinfo */
2566static chunkinfoptr
2567cireg_getfree ()
2568{
2569 chunkinfoptr freeci;
2570 chunkinfoptr freelst = 0;
2571 struct cireginfo *newciinfo = firstciinfo;
2572
2573 if (newciinfo) {
2574 freelst = newciinfo->freelist;
2575
2576 if (!freelst && newciinfo->next) {
2577 do {
2578 newciinfo = newciinfo->next;
2579 freelst = newciinfo->freelist;
2580 } while (!freelst && newciinfo->next);
2581 }
2582 }
2583
2584 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2585 if (freelst)
2586 {
[172]2587 freeci = freelst;
2588 newciinfo->freecounter--;
2589 newciinfo->freelist = freelst->fd;
2590
2591 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2592 sizeof(struct chunkinfo));
2593
2594 freeci->prev_size = 0;
2595 freeci->size = 0;
2596 freeci->req = 0;
2597 freeci->hash_next = NULL;
2598 freeci->fd = NULL;
2599 freeci->bk = NULL;
2600 freeci->chunk = NULL;
2601 return (freeci);
[171]2602 }
2603 else
2604 {
2605 /* No free chunkinfos, check if chunkinfo region still has place
2606 * for a chunkinfo. If not, extend the region.
2607 */
2608 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2609 cireg_extend ();
2610 /* Get a chunkinfo from the chunkinfo region */
2611 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2612 currciinfo->freecounter--;
2613 currciinfo->position++;
[172]2614
2615 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2616 sizeof(struct chunkinfo));
2617
[171]2618 return (freeci);
2619 }
2620}
2621
2622static void freeciregion(struct cireginfo *freeme) {
2623 /* free the chunkinfo region */
2624 struct cireginfo *newciinfo = firstciinfo;
2625 struct cireginfo *prevciinfo = firstciinfo;
2626 void *unmapme;
[172]2627
[171]2628 while (newciinfo && newciinfo != freeme) {
2629 prevciinfo = newciinfo;
2630 newciinfo = newciinfo->next;
2631 }
2632 assert(freeme == newciinfo); /* rw */
2633 assert(newciinfo != NULL); /* rw */
2634 if (newciinfo)
2635 prevciinfo->next = newciinfo->next;
2636 unmapme = (void *) ((char*)freeme - PGSIZE);
[172]2637 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
[171]2638#if PARANOIA > 1
2639 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2640#else
2641 munmap(unmapme, CIREGSIZE+PGSIZE);
2642#endif
2643}
2644
2645
2646static void freecilst_add(chunkinfoptr p) {
2647
2648 struct cireginfo *newciinfo;
[172]2649
[171]2650 newciinfo = currciinfo;
2651 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2652 p->fd = newciinfo->freelist;
2653 newciinfo->freelist = p;
2654 newciinfo->freecounter++;
[172]2655 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2656 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2657 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2658 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2659 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2660 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2661 } else {
2662 newciinfo = firstciinfo;
2663 if (newciinfo) {
2664 do {
2665 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2666 p->fd = newciinfo->freelist;
2667 newciinfo->freelist = p;
2668 newciinfo->freecounter++;
[172]2669 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2670 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2671 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2672 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2673 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2674 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2675 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2676 freeciregion(newciinfo);
2677 break;
2678 }
2679 newciinfo = newciinfo->next;
2680 } while (newciinfo);
2681 }
2682 }
2683}
2684
2685/* Calculate the hash table entry for a chunk */
2686#ifdef STARTHEAP_IS_ZERO
2687#define hash(p) (((unsigned long) p) >> 7)
2688#else
2689#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2690#endif
2691
2692static void
2693hashtable_add (chunkinfoptr ci)
2694{
2695 chunkinfoptr temp, next;
2696 unsigned long hashval;
2697 mchunkptr cic = chunk (ci);
2698
2699 hashval = hash (cic);
2700
2701 if (hashval < AMOUNTHASH) {
2702
2703 temp = hashtable[hashval];
2704
2705#ifdef DNMALLOC_DEBUG
2706 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2707#endif
2708
2709 /* If no pointer to a chunk info list is stored at this location
2710 * in the hashtable or if the chunk's address is smaller than the
2711 * one present, add the chunk to the front of the linked list
2712 */
2713 if (temp == 0 || chunk (temp) > cic)
2714 {
2715 ci->hash_next = temp;
2716 hashtable[hashval] = ci;
2717 if (!temp) /* more likely case */
2718 goto out;
2719 temp->prev_size = chunksize(ci);
2720 return;
2721 }
2722 else
2723 {
2724 /* We must place the chunk in the linked list for this hashentry
2725 * Loop to end of list or to a position where temp's chunk's address
2726 * is larger than the new chunkinfo's chunk's address
2727 */
2728 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2729 {
2730 ci->hash_next = temp->hash_next;
2731 temp->hash_next = ci;
2732 }
2733 else
2734 {
2735 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2736 {
2737 temp = temp->hash_next;
2738 }
2739 /* Place in linked list if not already there */
2740 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2741 {
2742 ci->hash_next = temp->hash_next;
2743 temp->hash_next = ci;
2744 }
2745 }
2746 }
2747 }
2748 else {
2749#ifdef DNMALLOC_CHECKS
2750 if (hashval >= AMOUNTHASH) {
2751 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2752 abort();
2753 }
2754#else
2755 assert(hashval < AMOUNTHASH);
2756#endif
2757 }
2758
2759 out:
2760 next = next_chunkinfo(ci);
2761 if (!next)
2762 return;
2763 next->prev_size = chunksize(ci);
2764}
2765
2766static void
2767hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2768{
2769 chunkinfoptr next;
2770
2771#ifdef DNMALLOC_DEBUG
2772 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2773 (unsigned long)hash(chunk(ci_insert));
2774#endif
2775
2776 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2777 hashtable_add(ci_insert);
2778 }
2779 else {
[172]2780
[171]2781 ci_insert->hash_next = ci_orig->hash_next;
2782 ci_orig->hash_next = ci_insert;
2783
2784 /* added for prevsize */
2785 if (!(ci_insert->hash_next))
2786 next = next_chunkinfo(ci_insert);
2787 else
2788 next = ci_insert->hash_next;
2789
2790 if (!next)
2791 {
2792 ci_insert->prev_size = chunksize(ci_orig);
2793 }
2794 else
2795 {
2796 next->prev_size = chunksize(ci_insert);
2797 ci_insert->prev_size = chunksize(ci_orig);
2798 }
2799 }
2800}
2801
2802static void
2803hashtable_remove (mchunkptr p)
2804{
2805 chunkinfoptr prevtemp, temp;
2806 unsigned long hashval;
2807
2808 hashval = hash (p);
2809#ifdef DNMALLOC_DEBUG
2810 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2811#endif
2812 assert(hashval < AMOUNTHASH); /* rw */
2813 prevtemp = temp = hashtable[hashval];
2814 if (chunk (temp) == p) {
2815 hashtable[hashval] = temp->hash_next;
2816 }
2817 else
2818 {
2819 if (temp && chunk (temp) != p) {
2820 do
2821 {
2822 prevtemp = temp;
2823 temp = temp->hash_next;
2824 } while (temp && chunk (temp) != p);
2825 }
2826#ifdef DNMALLOC_CHECKS
2827 if (!temp) {
2828 fprintf (stderr,
2829 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2830 p, hashval);
2831 abort();
2832 }
2833#else
2834 assert(temp != NULL);
2835#endif
2836 prevtemp->hash_next = temp->hash_next;
2837 }
2838}
2839
[172]2840/* mmapped chunks are multiples of pagesize, no hash_nexts,
2841 * just remove from the hashtable
2842 */
[171]2843#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2844
2845static void
2846hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2847{
2848 unsigned long hashval;
2849 chunkinfoptr next;
2850
2851#ifdef DNMALLOC_DEBUG
[172]2852 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
[171]2853#endif
2854
2855 if (ci_orig->hash_next != ci_todelete) {
2856 hashval = hash(chunk(ci_todelete));
2857 assert(hashval < AMOUNTHASH); /* rw */
2858#ifdef DNMALLOC_CHECKS
2859 if (hashtable[hashval] != ci_todelete ) {
2860 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2861 }
2862#else
2863 assert(hashtable[hashval] == ci_todelete);
2864#endif
2865 hashtable[hashval] = ci_todelete->hash_next;
2866 }
2867
2868 else {
2869 ci_orig->hash_next = ci_todelete->hash_next;
2870 if (!ci_orig->hash_next) {
2871 next = next_chunkinfo(ci_orig);
2872 } else {
2873 next = ci_orig->hash_next;
2874 }
2875 if (next)
2876 next->prev_size = chunksize(ci_orig);
2877
2878 }
2879}
2880
2881
2882static chunkinfoptr
2883hashtable_lookup (mchunkptr p)
2884{
2885 chunkinfoptr ci;
2886 unsigned long hashval;
2887
2888 /* if we were called wrongly
2889 * if ((char *) p < startheap) return 0;
2890 */
2891 if ((char *) p >= startheap)
2892 {
2893 hashval = hash (p);
2894 assert(hashval < AMOUNTHASH); /* rw */
2895 ci = hashtable[hashval];
2896 if (ci && chunk (ci) == p)
2897 return ci;
2898
2899 if (ci) {
2900 do {
2901 ci = ci->hash_next;
2902 } while (ci && chunk (ci) != p);
2903 }
2904#ifdef DNMALLOC_CHECKS
2905 // This should never occur but if it does, we'd like to know
2906 if (!ci) {
2907 fprintf (stderr,
2908 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2909 p, hashval);
2910 abort();
2911 }
2912#else
2913 assert(ci != NULL);
2914#endif
2915 return ci;
2916 }
2917 return 0;
2918}
2919
2920
2921
2922/*
2923 ----------- Internal state representation and initialization -----------
2924*/
2925
2926struct malloc_state {
2927
2928 /* The maximum chunk size to be eligible for fastbin */
2929 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2930
2931 /* Fastbins */
2932 mfastbinptr fastbins[NFASTBINS];
2933
2934 /* Base of the topmost chunk -- not otherwise kept in a bin */
2935 chunkinfoptr top;
2936
2937 /* The remainder from the most recent split of a small request */
2938 chunkinfoptr last_remainder;
2939
2940 /* Normal bins */
2941 struct chunkinfo bins[NBINS];
2942
2943 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2944 unsigned int binmap[BINMAPSIZE+1];
2945
2946 /* Tunable parameters */
2947 CHUNK_SIZE_T trim_threshold;
2948 INTERNAL_SIZE_T top_pad;
2949 INTERNAL_SIZE_T mmap_threshold;
2950
2951 /* Memory map support */
2952 int n_mmaps;
2953 int n_mmaps_max;
2954 int max_n_mmaps;
2955
2956 /* Cache malloc_getpagesize */
2957 unsigned int pagesize;
2958
2959 /* Canary */
2960 char guard_stored[GUARD_SIZE];
2961
2962 /* Track properties of MORECORE */
2963 unsigned int morecore_properties;
2964
2965 /* Statistics */
2966 INTERNAL_SIZE_T mmapped_mem;
2967 INTERNAL_SIZE_T sbrked_mem;
2968 INTERNAL_SIZE_T max_sbrked_mem;
2969 INTERNAL_SIZE_T max_mmapped_mem;
2970 INTERNAL_SIZE_T max_total_mem;
2971};
2972
2973typedef struct malloc_state *mstate;
2974
2975/*
2976 There is exactly one instance of this struct in this malloc.
2977 If you are adapting this malloc in a way that does NOT use a static
2978 malloc_state, you MUST explicitly zero-fill it before using. This
2979 malloc relies on the property that malloc_state is initialized to
2980 all zeroes (as is true of C statics).
2981*/
2982
2983static struct malloc_state * av_ = NULL; /* never directly referenced */
2984
2985/*
2986 All uses of av_ are via get_malloc_state().
2987 At most one "call" to get_malloc_state is made per invocation of
2988 the public versions of malloc and free, but other routines
2989 that in turn invoke malloc and/or free may call more then once.
2990 Also, it is called in check* routines if DEBUG is set.
2991*/
2992
2993#define get_malloc_state() (av_)
2994
2995/*
2996 Initialize a malloc_state struct.
2997
2998 This is called only from within malloc_consolidate, which needs
2999 be called in the same contexts anyway. It is never called directly
3000 outside of malloc_consolidate because some optimizing compilers try
3001 to inline it at all call points, which turns out not to be an
3002 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3003*/
3004
3005#if __STD_C
3006static void malloc_mmap_state(void)
3007#else
3008static void malloc_mmap_state()
3009#endif
3010{
3011 int mprot;
3012 unsigned long pagesize = malloc_getpagesize;
3013 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3014
3015 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3016
3017
3018#ifdef NDEBUG
3019 if (foo == MAP_FAILED) {
3020 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3021 abort ();
3022 }
3023#else
3024 assert(foo != MAP_FAILED);
3025#endif
3026
3027 mprot = mprotect(foo, pagesize, PROT_NONE);
3028#ifdef NDEBUG
3029 if (mprot == -1) {
3030 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3031 strerror (errno));
3032 abort ();
3033 }
3034#else
3035 assert(mprot != -1);
3036#endif
3037
3038 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3039
3040 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3041
3042 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3043#ifdef NDEBUG
3044 if (mprot == -1) {
3045 fprintf (stderr,
3046 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3047 strerror (errno));
3048 abort ();
3049 }
3050#else
3051 assert(mprot != -1);
3052#endif
3053}
3054
3055#if __STD_C
3056static void malloc_init_state(mstate av)
3057#else
3058static void malloc_init_state(av) mstate av;
3059#endif
3060{
3061 int i;
3062 mbinptr bin;
3063
3064 void * morecore_test = MORECORE(0);
3065 unsigned long hashval;
3066
3067 /* Test morecore function
3068 */
3069 set_morecore32bit(av);
3070
3071 if (morecore_test == MORECORE_FAILURE)
3072 {
3073 set_nonmorecore32bit(av);
3074 }
3075 else
3076 {
3077 /* On 64bit systems, the heap may be located above the
3078 * 32bit address space. Since mmap() probably still can be
3079 * convinced to map within 32bit, we don't use sbrk().
3080 */
3081 hashval = hash (morecore_test);
3082 if (hashval >= AMOUNTHASH)
3083 {
3084 set_nonmorecore32bit(av);
3085 }
3086 }
3087
3088
3089 /* Establish circular links for normal bins */
3090 for (i = 1; i < NBINS; ++i) {
3091 bin = bin_at(av,i);
3092 bin->fd = bin->bk = bin;
3093 }
3094
3095 av->top_pad = DEFAULT_TOP_PAD;
3096 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3097 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3098 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3099
3100#if MORECORE_CONTIGUOUS
3101 set_contiguous(av);
3102#else
3103 set_noncontiguous(av);
3104#endif
3105
3106 set_max_fast(av, DEFAULT_MXFAST);
3107
3108 av->top = cireg_getfree ();
3109 av->top->chunk = (mchunkptr) startheap;
3110 av->top->size = 0;
3111 set_previnuse(av->top);
3112 clear_inuse(av->top);
3113 hashtable[0] = av->top;
3114 av->pagesize = malloc_getpagesize;
3115
3116 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3117
3118}
3119
3120/*
3121 Other internal utilities operating on mstates
3122*/
3123
3124#if __STD_C
3125static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3126static int sYSTRIm(size_t, mstate);
3127static void malloc_consolidate(mstate);
3128#else
3129static Void_t* sYSMALLOc();
3130static int sYSTRIm();
3131static void malloc_consolidate();
3132#endif
3133
3134/* dnmalloc functions */
3135/* needs mstate so moved here */
3136
3137static chunkinfoptr
3138next_chunkinfo (chunkinfoptr ci)
3139{
3140 mchunkptr nextp;
3141 unsigned long hashval;
3142 chunkinfoptr cinfonextp;
3143 mstate av = get_malloc_state();
3144
3145 /* ci is not the last element in the linked list, just
3146 return the next chunkinfo from the list
3147 */
3148 if (!ci->hash_next)
3149 {
3150 /* ci is the last element, find the next chunkinfo by
3151 * looking up the chunkinfo for the chunk that is after p's chunk
3152 */
3153 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3154
3155 if (!(nextp == av->top->chunk))
3156 {
3157 hashval = hash (nextp);
3158 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3159 cinfonextp = hashtable[hashval];
3160 if (cinfonextp && chunk (cinfonextp) == nextp)
3161 return cinfonextp;
3162
3163#ifdef DNMALLOC_CHECKS_EXTRA
3164 /* This seems bogus; a chunkinfo may legally have no nextp if
3165 * it's the last one allocated (?)
3166 */
3167 else {
3168 if (cinfonextp)
3169 fprintf (stderr,
3170 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3171 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3172 else
3173 fprintf (stderr,
3174 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3175 chunk(ci), hashval, "null", "null", nextp);
3176 }
3177#endif
3178
3179 return NULL;
3180 }
3181 else
3182 {
3183 return av->top;
3184 }
3185
3186 }
3187 else
3188 {
3189 return (ci->hash_next);
3190 }
3191}
3192
3193static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3194 mchunkptr nextp;
3195 if (oldp->hash_next == newp)
3196 return 1;
3197 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3198 if (nextp == chunk(newp))
3199 return 1;
3200 return 0;
3201}
3202
3203
3204
3205/* Get the chunkinfo of the physically previous chunk */
3206/* Since we disposed of prev_size, we need this function to find the previous */
3207
3208static chunkinfoptr
3209prev_chunkinfo (chunkinfoptr ci)
3210{
3211 unsigned int i;
3212 chunkinfoptr prev;
3213 mchunkptr prevchunk = 0;
3214 /* chunkinfoptr temp; */
3215
3216 /* Get the hashtable location of the chunkinfo */
3217 i = hash (chunk (ci));
3218 assert(i < AMOUNTHASH); /* rw */
3219
3220 /* Get the first element of the linked list of chunkinfo's that contains p */
3221 prev = hashtable[i];
3222
3223 if (ci == prev) {
3224 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3225 i = hash(prevchunk);
3226 assert(i < AMOUNTHASH); /* rw */
3227 /* Loop over the linked list until we reach the last element */
3228 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3229 } else {
3230 /* p is not the first element in the linked list, we can just
3231 loop over the list and return the previous
3232 */
3233 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3234 }
3235
3236 return prev;
3237}
3238
3239
3240/*
3241 Debugging support
3242 Dnmalloc broke dlmallocs debugging functions, should fix them some
3243 time in the future, for now leave them undefined.
3244*/
3245
3246#define check_chunk(P)
3247#define check_free_chunk(P)
3248#define check_inuse_chunk(P)
3249#define check_remalloced_chunk(P,N)
3250#define check_malloced_chunk(P,N)
3251#define check_malloc_state()
3252
3253
3254/* ----------- Routines dealing with system allocation -------------- */
3255
3256/*
3257 sysmalloc handles malloc cases requiring more memory from the system.
3258 On entry, it is assumed that av->top does not have enough
3259 space to service request for nb bytes, thus requiring that av->top
3260 be extended or replaced.
3261*/
3262
3263#if __STD_C
3264static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3265#else
3266static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3267#endif
3268{
3269 chunkinfoptr old_top; /* incoming value of av->top */
3270 INTERNAL_SIZE_T old_size; /* its size */
3271 char* old_end; /* its end address */
3272
3273 long size; /* arg to first MORECORE or mmap call */
3274 char* brk; /* return value from MORECORE */
3275
3276 long correction; /* arg to 2nd MORECORE call */
3277 char* snd_brk; /* 2nd return val */
3278
3279 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3280 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3281 char* aligned_brk; /* aligned offset into brk */
3282
3283 chunkinfoptr p; /* the allocated/returned chunk */
3284 chunkinfoptr remainder; /* remainder from allocation */
3285 chunkinfoptr fencepost; /* fencepost */
3286 CHUNK_SIZE_T remainder_size; /* its size */
3287
3288 CHUNK_SIZE_T sum; /* for updating stats */
3289
3290 size_t pagemask = av->pagesize - 1;
3291
3292#ifdef DNMALLOC_DEBUG
3293 fprintf(stderr, "Enter sysmalloc\n");
3294#endif
3295 /*
3296 If there is space available in fastbins, consolidate and retry
3297 malloc from scratch rather than getting memory from system. This
3298 can occur only if nb is in smallbin range so we didn't consolidate
3299 upon entry to malloc. It is much easier to handle this case here
3300 than in malloc proper.
3301 */
3302
3303
3304 if (have_fastchunks(av)) {
[172]3305 Void_t * retval;
[171]3306 assert(in_smallbin_range(nb));
3307 malloc_consolidate(av);
3308#ifdef DNMALLOC_DEBUG
3309 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3310#endif
[172]3311 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3312 VALGRIND_FREELIKE_BLOCK(retval, 0);
3313 return retval;
[171]3314 }
3315
3316
3317 /*
3318 If have mmap, and the request size meets the mmap threshold, and
3319 the system supports mmap, and there are few enough currently
3320 allocated mmapped regions, try to directly map this request
3321 rather than expanding top.
3322 */
3323
3324 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3325 (av->n_mmaps < av->n_mmaps_max))) {
3326
3327 char* mm; /* return value from mmap call*/
3328
3329 /*
3330 Round up size to nearest page. For mmapped chunks, the overhead
3331 is one SIZE_SZ unit larger than for normal chunks, because there
3332 is no following chunk whose prev_size field could be used.
3333 */
3334 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3335
3336 /* Don't try if size wraps around 0 */
3337 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3338
3339
3340 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3341
3342 if (mm != (char*)(MORECORE_FAILURE)) {
3343
[172]3344 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3345
[171]3346 /*
3347 The offset to the start of the mmapped region is stored
3348 in the prev_size field of the chunk. This allows us to adjust
3349 returned start address to meet alignment requirements here
3350 and in memalign(), and still be able to compute proper
3351 address argument for later munmap in free() and realloc().
3352 */
3353
3354 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3355 p = cireg_getfree();
3356
3357 if (front_misalign > 0) {
3358 correction = MALLOC_ALIGNMENT - front_misalign;
3359 p->chunk = (mchunkptr)(mm + correction);
3360 p->hash_next = (chunkinfoptr) correction;
3361 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3362 }
3363 else {
3364 p->chunk = (mchunkptr)mm;
3365 p->hash_next = 0;
3366 set_head(p, size|INUSE|IS_MMAPPED);
3367 }
3368 hashtable_add(p);
3369 /* update statistics */
3370
3371 if (++av->n_mmaps > av->max_n_mmaps)
3372 av->max_n_mmaps = av->n_mmaps;
3373
3374 sum = av->mmapped_mem += size;
3375 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3376 av->max_mmapped_mem = sum;
3377 sum += av->sbrked_mem;
3378 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3379 av->max_total_mem = sum;
3380
3381 check_chunk(p);
3382
3383#ifdef DNMALLOC_DEBUG
3384 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3385 size, (unsigned long)/* size_t */av->max_total_mem );
3386#endif
3387 return chunk(p);
3388 }
3389 }
3390 }
3391
3392 /* Record incoming configuration of top */
3393
3394 old_top = av->top;
3395 old_size = chunksize(old_top);
3396 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3397
3398 brk = snd_brk = (char*)(MORECORE_FAILURE);
3399
3400 /*
3401 If not the first time through, we require old_size to be
3402 at least MINSIZE and to have prev_inuse set.
3403 */
3404
3405 /* assert((old_top == initial_top(av) && old_size == 0) ||
3406 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3407 prev_inuse(old_top))); */
3408
3409 /* Precondition: not enough current space to satisfy nb request */
3410 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3411
3412 /* Precondition: all fastbins are consolidated */
3413 assert(!have_fastchunks(av));
3414
3415 /* Request enough space for nb + pad + overhead */
3416 size = nb + av->top_pad + MINSIZE;
3417
3418 /*
3419 If contiguous, we can subtract out existing space that we hope to
3420 combine with new space. We add it back later only if
3421 we don't actually get contiguous space.
3422 */
3423 if (contiguous(av))
3424 size -= old_size;
3425
3426 /*
3427 Round to a multiple of page size.
3428 If MORECORE is not contiguous, this ensures that we only call it
3429 with whole-page arguments. And if MORECORE is contiguous and
3430 this is not first time through, this preserves page-alignment of
3431 previous calls. Otherwise, we correct to page-align below.
3432 */
3433
3434 size = (size + pagemask) & ~pagemask;
3435
3436 /*
3437 Don't try to call MORECORE if argument is so big as to appear
3438 negative. Note that since mmap takes size_t arg, it may succeed
3439 below even if we cannot call MORECORE.
3440 */
3441 if (size > 0 && morecore32bit(av))
3442 brk = (char*)(MORECORE(size));
3443
3444 /*
3445 If have mmap, try using it as a backup when MORECORE fails or
3446 cannot be used. This is worth doing on systems that have "holes" in
3447 address space, so sbrk cannot extend to give contiguous space, but
3448 space is available elsewhere. Note that we ignore mmap max count
3449 and threshold limits, since the space will not be used as a
3450 segregated mmap region.
3451 */
3452 if (brk != (char*)(MORECORE_FAILURE)) {
3453 av->sbrked_mem += size;
[172]3454 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
[171]3455 }
3456
3457 else {
3458
3459#ifdef DNMALLOC_DEBUG
3460 fprintf(stderr, "Morecore failure in sysmalloc\n");
3461#endif
3462
3463 /* Cannot merge with old top, so add its size back in */
3464 if (contiguous(av))
3465 size = (size + old_size + pagemask) & ~pagemask;
3466
3467 /* If we are relying on mmap as backup, then use larger units */
3468 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3469 size = MMAP_AS_MORECORE_SIZE;
3470
3471 /* Don't try if size wraps around 0 */
3472 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3473
3474#ifdef DNMALLOC_DEBUG
3475 fprintf(stderr, "Try mmap in sysmalloc\n");
3476#endif
3477 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3478
3479 if (brk != (char*)(MORECORE_FAILURE)) {
3480
[172]3481 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3482
[171]3483 av->mmapped_mem += size;
3484#ifdef DNMALLOC_DEBUG
3485 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3486#endif
3487
3488 /* We do not need, and cannot use, another sbrk call to find end */
3489 snd_brk = brk + size;
3490
3491 /*
3492 Record that we no longer have a contiguous sbrk region.
3493 After the first time mmap is used as backup, we do not
3494 ever rely on contiguous space since this could incorrectly
3495 bridge regions.
3496 */
3497 set_noncontiguous(av);
3498 }
3499 }
3500 }
3501
3502 if (brk != (char*)(MORECORE_FAILURE)) {
3503#ifdef DNMALLOC_DEBUG
3504 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3505 size, (unsigned long)av->sbrked_mem);
3506#endif
3507 /* av->sbrked_mem += size; moved up */
3508
3509 /*
3510 If MORECORE extends previous space, we can likewise extend top size.
3511 */
3512
3513 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3514 set_head(old_top, (size + old_size) | PREV_INUSE);
3515#ifdef DNMALLOC_DEBUG
3516 fprintf(stderr, "Previous space extended\n");
3517#endif
3518 }
3519
3520 /*
3521 Otherwise, make adjustments:
3522
3523 * If the first time through or noncontiguous, we need to call sbrk
3524 just to find out where the end of memory lies.
3525
3526 * We need to ensure that all returned chunks from malloc will meet
3527 MALLOC_ALIGNMENT
3528
3529 * If there was an intervening foreign sbrk, we need to adjust sbrk
3530 request size to account for fact that we will not be able to
3531 combine new space with existing space in old_top.
3532
3533 * Almost all systems internally allocate whole pages at a time, in
3534 which case we might as well use the whole last page of request.
3535 So we allocate enough more memory to hit a page boundary now,
3536 which in turn causes future contiguous calls to page-align.
3537 */
3538
3539 else {
3540 front_misalign = 0;
3541 end_misalign = 0;
3542 correction = 0;
3543 aligned_brk = brk;
3544
3545 /*
3546 If MORECORE returns an address lower than we have seen before,
3547 we know it isn't really contiguous. This and some subsequent
3548 checks help cope with non-conforming MORECORE functions and
3549 the presence of "foreign" calls to MORECORE from outside of
3550 malloc or by other threads. We cannot guarantee to detect
3551 these in all cases, but cope with the ones we do detect.
3552 */
3553 if (contiguous(av) && old_size != 0 && brk < old_end) {
3554 set_noncontiguous(av);
3555 }
3556
3557 /* handle contiguous cases */
3558 if (contiguous(av)) {
3559
3560#ifdef DNMALLOC_DEBUG
3561 fprintf(stderr, "Handle contiguous cases\n");
3562#endif
3563 /*
3564 We can tolerate forward non-contiguities here (usually due
3565 to foreign calls) but treat them as part of our space for
3566 stats reporting.
3567 */
3568 if (old_size != 0)
3569 av->sbrked_mem += brk - old_end;
3570
3571 /* Guarantee alignment of first new chunk made from this space */
3572
3573 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3574 if (front_misalign > 0) {
3575
3576 /*
3577 Skip over some bytes to arrive at an aligned position.
3578 We don't need to specially mark these wasted front bytes.
3579 They will never be accessed anyway because
3580 prev_inuse of av->top (and any chunk created from its start)
3581 is always true after initialization.
3582 */
3583
3584 correction = MALLOC_ALIGNMENT - front_misalign;
3585 aligned_brk += correction;
3586 }
3587
3588 /*
3589 If this isn't adjacent to existing space, then we will not
3590 be able to merge with old_top space, so must add to 2nd request.
3591 */
3592
3593 correction += old_size;
3594
3595 /* Extend the end address to hit a page boundary */
3596 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3597 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3598
3599 assert(correction >= 0);
3600 snd_brk = (char*)(MORECORE(correction));
3601
3602 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3603 /*
3604 If can't allocate correction, try to at least find out current
3605 brk. It might be enough to proceed without failing.
3606 */
3607 correction = 0;
3608 snd_brk = (char*)(MORECORE(0));
3609 }
3610 else if (snd_brk < brk) {
3611 /*
3612 If the second call gives noncontiguous space even though
3613 it says it won't, the only course of action is to ignore
3614 results of second call, and conservatively estimate where
3615 the first call left us. Also set noncontiguous, so this
3616 won't happen again, leaving at most one hole.
3617
3618 Note that this check is intrinsically incomplete. Because
3619 MORECORE is allowed to give more space than we ask for,
3620 there is no reliable way to detect a noncontiguity
3621 producing a forward gap for the second call.
3622 */
3623 snd_brk = brk + size;
3624 correction = 0;
3625 set_noncontiguous(av);
3626 }
[172]3627 else {
3628 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3629 }
[171]3630
3631 }
3632
3633 /* handle non-contiguous cases */
3634 else {
3635
3636#ifdef DNMALLOC_DEBUG
3637 fprintf(stderr, "Handle non-contiguous cases\n");
3638#endif
3639
3640 /* MORECORE/mmap must correctly align */
3641 assert(aligned_OK(brk));
3642
3643 /* Find out current end of memory */
3644 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3645 snd_brk = (char*)(MORECORE(0));
3646 av->sbrked_mem += snd_brk - brk - size;
3647 }
3648#ifdef DNMALLOC_DEBUG
3649 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3650#endif
3651 }
3652
3653 /* Adjust top based on results of second sbrk.
3654 *
3655 * If mmap() has been used as backup for failed morecore(),
3656 * we end up in this branch as well.
3657 */
3658 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3659#ifdef DNMALLOC_DEBUG
3660 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3661#endif
3662 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3663 av->top = cireg_getfree();
3664 av->top->chunk = (mchunkptr)aligned_brk;
3665 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3666#ifdef DNMALLOC_DEBUG
3667 fprintf(stderr, "Adjust top, top %p size %lu\n",
3668 av->top, (unsigned long)chunksize(av->top));
3669#endif
3670 hashtable_add(av->top);
3671 av->sbrked_mem += correction;
3672
3673 /*
3674 If not the first time through, we either have a
3675 gap due to foreign sbrk or a non-contiguous region. Insert a
3676 double fencepost at old_top to prevent consolidation with space
3677 we don't own. These fenceposts are artificial chunks that are
3678 marked as inuse. Original dlmalloc had two of these but too
3679 small to use. To ensure that the linked lists contain a maximum
3680 of 8 elements we only use 1. Inuse is determined by the
3681 current rather than the next chunk anyway.
3682 */
3683
3684 if (old_size != 0) {
3685#ifdef DNMALLOC_DEBUG
3686 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3687#endif
3688 /*
3689 Shrink old_top to insert fenceposts, keeping size a
3690 multiple of MALLOC_ALIGNMENT. We know there is at least
3691 enough space in old_top to do this.
3692 */
3693#ifdef DNMALLOC_DEBUG
3694 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3695 old_top, (unsigned long)old_size);
3696#endif
3697 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3698 set_head(old_top, old_size | PREV_INUSE);
3699#ifdef DNMALLOC_DEBUG
3700 fprintf(stderr, "Adjust top, old_size after %lu\n",
3701 (unsigned long)old_size);
3702#endif
3703
3704 /*
3705 Note that the following assignments completely overwrite
3706 old_top when old_size was previously MINSIZE. This is
3707 intentional. We need the fencepost, even if old_top otherwise gets
3708 lost.
3709 */
3710 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3711 it's marked inuse it will never be coalesced
3712 */
3713 fencepost = cireg_getfree();
3714 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3715 old_size);
3716 fencepost->size = 16|INUSE|PREV_INUSE;
3717 hashtable_add(fencepost);
3718 /*
3719 If possible, release the rest, suppressing trimming.
3720 */
3721 if (old_size >= MINSIZE) {
3722 INTERNAL_SIZE_T tt = av->trim_threshold;
3723#ifdef DNMALLOC_DEBUG
3724 fprintf(stderr, "Release\n");
3725#endif
3726 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3727 set_head(old_top, old_size | PREV_INUSE | INUSE);
3728 guard_set(av->guard_stored, old_top, 0, old_size);
[172]3729 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
[171]3730 fREe(chunk(old_top));
3731 av->trim_threshold = tt;
3732#ifdef DNMALLOC_DEBUG
3733 fprintf(stderr, "Release done\n");
3734#endif
3735 }
3736
3737#ifdef DNMALLOC_DEBUG
3738 fprintf(stderr, "Adjust top, size %lu\n",
3739 (unsigned long)chunksize(av->top));
3740#endif
3741
3742 } /* fenceposts */
3743 } /* adjust top */
3744 } /* not extended previous region */
3745
3746 /* Update statistics */ /* FIXME check this */
3747 sum = av->sbrked_mem;
3748 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3749 av->max_sbrked_mem = sum;
3750
3751 sum += av->mmapped_mem;
3752 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3753 av->max_total_mem = sum;
3754
3755 check_malloc_state();
3756
3757 /* finally, do the allocation */
3758
3759 p = av->top;
3760 size = chunksize(p);
3761
3762#ifdef DNMALLOC_DEBUG
3763 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3764 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3765#endif
3766
3767 /* check that one of the above allocation paths succeeded */
3768 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3769 remainder_size = size - nb;
3770 remainder = cireg_getfree();
3771 remainder->chunk = chunk_at_offset(chunk(p), nb);
3772 av->top = remainder;
3773 set_head(p, nb | PREV_INUSE | INUSE);
3774 set_head(remainder, remainder_size | PREV_INUSE);
3775 hashtable_insert (p, av->top);
3776 check_malloced_chunk(p, nb);
3777#ifdef DNMALLOC_DEBUG
3778 fprintf(stderr, "Return any (total %lu)\n",
3779 (unsigned long)/* size_t */av->max_total_mem );
3780#endif
3781 return chunk(p);
3782 }
3783
3784 }
3785
3786#ifdef DNMALLOC_DEBUG
3787 fprintf(stderr, "Return failed (total %lu)\n",
3788 (unsigned long)/* size_t */av->max_total_mem );
3789#endif
3790
3791 /* catch all failure paths */
3792 MALLOC_FAILURE_ACTION;
3793 return 0;
3794}
3795
3796
3797
3798
3799/*
3800 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3801 to the system (via negative arguments to sbrk) if there is unused
3802 memory at the `high' end of the malloc pool. It is called
3803 automatically by free() when top space exceeds the trim
3804 threshold. It is also called by the public malloc_trim routine. It
3805 returns 1 if it actually released any memory, else 0.
3806*/
3807
3808#if __STD_C
3809static int sYSTRIm(size_t pad, mstate av)
3810#else
3811static int sYSTRIm(pad, av) size_t pad; mstate av;
3812#endif
3813{
3814 long top_size; /* Amount of top-most memory */
3815 long extra; /* Amount to release */
3816 long released; /* Amount actually released */
3817 char* current_brk; /* address returned by pre-check sbrk call */
3818 char* new_brk; /* address returned by post-check sbrk call */
3819 size_t pagesz;
3820
3821 pagesz = av->pagesize;
3822 top_size = chunksize(av->top);
3823
3824 /* Release in pagesize units, keeping at least one page */
3825 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3826
3827 if (extra > 0) {
3828
3829 /*
3830 Only proceed if end of memory is where we last set it.
3831 This avoids problems if there were foreign sbrk calls.
3832 */
3833 current_brk = (char*)(MORECORE(0));
3834 if (current_brk == (char*)(av->top) + top_size) {
3835
3836 /*
3837 Attempt to release memory. We ignore MORECORE return value,
3838 and instead call again to find out where new end of memory is.
3839 This avoids problems if first call releases less than we asked,
3840 of if failure somehow altered brk value. (We could still
3841 encounter problems if it altered brk in some very bad way,
3842 but the only thing we can do is adjust anyway, which will cause
3843 some downstream failure.)
3844 */
3845
3846 MORECORE(-extra);
3847 new_brk = (char*)(MORECORE(0));
3848
3849 if (new_brk != (char*)MORECORE_FAILURE) {
3850 released = (long)(current_brk - new_brk);
3851
3852 if (released != 0) {
3853 /* Success. Adjust top. */
3854 av->sbrked_mem -= released;
3855 set_head(av->top, (top_size - released) | PREV_INUSE);
3856 check_malloc_state();
3857 return 1;
3858 }
3859 }
3860 }
3861 }
3862 return 0;
3863}
3864
3865/*
3866 ------------------------------ malloc ------------------------------
3867*/
3868
3869
3870#if __STD_C
3871DL_STATIC Void_t* mALLOc(size_t bytes)
3872#else
3873DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3874#endif
3875{
3876 mstate av = get_malloc_state();
3877
3878 INTERNAL_SIZE_T nb; /* normalized request size */
3879 unsigned int idx; /* associated bin index */
3880 mbinptr bin; /* associated bin */
3881 mfastbinptr* fb; /* associated fastbin */
3882
3883 chunkinfoptr victim; /* inspected/selected chunk */
3884 INTERNAL_SIZE_T size; /* its size */
3885 int victim_index; /* its bin index */
3886
3887 chunkinfoptr remainder; /* remainder from a split */
3888 CHUNK_SIZE_T remainder_size; /* its size */
3889
3890 unsigned int block; /* bit map traverser */
3891 unsigned int bit; /* bit map traverser */
3892 unsigned int map; /* current word of binmap */
3893
3894 chunkinfoptr fwd; /* misc temp for linking */
3895 chunkinfoptr bck; /* misc temp for linking */
3896
3897 Void_t* retval;
3898
3899 /* chunkinfoptr next; */
3900
3901
3902 /*
3903 Convert request size to internal form by adding SIZE_SZ bytes
3904 overhead plus possibly more to obtain necessary alignment and/or
3905 to obtain a size of at least MINSIZE, the smallest allocatable
3906 size. Also, checked_request2size traps (returning 0) request sizes
3907 that are so large that they wrap around zero when padded and
3908 aligned.
3909 */
3910#if defined(SH_CUTEST)
3911 extern int malloc_count;
3912 ++malloc_count;
3913#endif
3914
3915 checked_request2size(bytes, nb);
3916
3917 /*
3918 Bypass search if no frees yet
3919 */
3920 if (av && have_anychunks(av)) {
3921 goto av_initialized;
3922 }
3923 else {
3924 if (!av || av->max_fast == 0) { /* initialization check */
3925 malloc_consolidate(av);
3926 av = get_malloc_state();
3927 }
3928 goto use_top;
3929 }
3930
3931 av_initialized:
3932
3933 /*
3934 If the size qualifies as a fastbin, first check corresponding bin.
3935 */
3936 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3937 fb = &(av->fastbins[(fastbin_index(nb))]);
3938 if ( (victim = *fb) != 0) {
3939 *fb = victim->fd;
3940 check_remalloced_chunk(victim, nb);
3941 guard_set(av->guard_stored, victim, bytes, nb);
[172]3942 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3943 return chunk(victim);
3944 }
3945 }
3946
3947 /*
3948 If a small request, check regular bin. Since these "smallbins"
3949 hold one size each, no searching within bins is necessary.
3950 (For a large request, we need to wait until unsorted chunks are
3951 processed to find best fit. But for small ones, fits are exact
3952 anyway, so we can check now, which is faster.)
3953 */
3954
3955 if (in_smallbin_range(nb)) {
3956 idx = smallbin_index(nb);
3957 bin = bin_at(av,idx);
3958
3959 if ((victim = last(bin)) != bin) {
3960 bck = victim->bk;
3961 bin->bk = bck;
3962 bck->fd = bin;
3963
3964 set_all_inuse(victim);
3965
3966 check_malloced_chunk(victim, nb);
3967 guard_set(av->guard_stored, victim, bytes, nb);
[172]3968 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3969 return chunk(victim);
3970 }
3971 }
3972
3973 /*
3974 If this is a large request, consolidate fastbins before continuing.
3975 While it might look excessive to kill all fastbins before
3976 even seeing if there is space available, this avoids
3977 fragmentation problems normally associated with fastbins.
3978 Also, in practice, programs tend to have runs of either small or
3979 large requests, but less often mixtures, so consolidation is not
3980 invoked all that often in most programs. And the programs that
3981 it is called frequently in otherwise tend to fragment.
3982 */
3983
3984 else {
3985 idx = largebin_index(nb);
3986 if (have_fastchunks(av))
3987 malloc_consolidate(av);
3988 }
3989
3990 /*
3991 Process recently freed or remaindered chunks, taking one only if
3992 it is exact fit, or, if this a small request, the chunk is remainder from
3993 the most recent non-exact fit. Place other traversed chunks in
3994 bins. Note that this step is the only place in any routine where
3995 chunks are placed in bins.
3996 */
3997
3998 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3999 bck = victim->bk;
4000 size = chunksize(victim);
4001
4002 /*
4003 If a small request, try to use last remainder if it is the
4004 only chunk in unsorted bin. This helps promote locality for
4005 runs of consecutive small requests. This is the only
4006 exception to best-fit, and applies only when there is
4007 no exact fit for a small chunk.
4008 */
4009
4010 if (UNLIKELY(in_smallbin_range(nb) &&
4011 bck == unsorted_chunks(av) &&
4012 victim == av->last_remainder &&
4013 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4014
4015 /* split and reattach remainder */
4016 remainder_size = size - nb;
4017 remainder = cireg_getfree();
4018 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4019 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4020 av->last_remainder = remainder;
4021 remainder->bk = remainder->fd = unsorted_chunks(av);
4022
4023 set_head(victim, nb | PREV_INUSE|INUSE);
4024 set_head(remainder, remainder_size | PREV_INUSE);
4025 hashtable_insert(victim, remainder);
4026
4027 check_malloced_chunk(victim, nb);
4028 guard_set(av->guard_stored, victim, bytes, nb);
[172]4029 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4030 return chunk(victim);
4031 }
4032
4033 /* remove from unsorted list */
4034 unsorted_chunks(av)->bk = bck;
4035 bck->fd = unsorted_chunks(av);
4036
4037 /* Take now instead of binning if exact fit */
4038
4039 if (UNLIKELY(size == nb)) {
4040 set_all_inuse(victim)
4041 check_malloced_chunk(victim, nb);
4042 guard_set(av->guard_stored, victim, bytes, nb);
[172]4043 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4044 return chunk(victim);
4045 }
4046
4047 /* place chunk in bin */
4048
4049 if (in_smallbin_range(size)) {
4050
4051 victim_index = smallbin_index(size);
4052 bck = bin_at(av, victim_index);
4053 fwd = bck->fd;
4054 }
4055 else {
4056 victim_index = largebin_index(size);
4057 bck = bin_at(av, victim_index);
4058 fwd = bck->fd;
4059
4060 if (UNLIKELY(fwd != bck)) {
4061 /* if smaller than smallest, place first */
4062 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4063 fwd = bck;
4064 bck = bck->bk;
4065 }
4066 else if ((CHUNK_SIZE_T)(size) >=
4067 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4068
4069 /* maintain large bins in sorted order */
4070 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4071 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4072 fwd = fwd->fd;
4073 bck = fwd->bk;
4074 }
4075 }
4076 }
4077
4078 mark_bin(av, victim_index);
4079 victim->bk = bck;
4080 victim->fd = fwd;
4081 fwd->bk = victim;
4082 bck->fd = victim;
4083 }
4084
4085 /*
4086 If a large request, scan through the chunks of current bin to
4087 find one that fits. (This will be the smallest that fits unless
4088 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4089 the only step where an unbounded number of chunks might be
4090 scanned without doing anything useful with them. However the
4091 lists tend to be short.
4092 */
4093
4094 if (!in_smallbin_range(nb)) {
4095 bin = bin_at(av, idx);
4096
4097 victim = last(bin);
4098
4099 if (UNLIKELY(victim != bin)) {
4100
4101 do {
4102 size = chunksize(victim);
4103
4104 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4105 remainder_size = size - nb;
4106 unlink(victim, bck, fwd);
4107
4108 /* Split */
4109 if (remainder_size >= MINSIZE) {
4110 remainder = cireg_getfree();
4111 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4112 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4113 remainder->bk = remainder->fd = unsorted_chunks(av);
4114 set_head(victim, nb | PREV_INUSE | INUSE);
4115 set_head(remainder, remainder_size | PREV_INUSE);
4116 hashtable_insert(victim, remainder);
4117 check_malloced_chunk(victim, nb);
4118 guard_set(av->guard_stored, victim, bytes, nb);
[172]4119 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4120 return chunk(victim);
4121 }
4122 /* Exhaust */
4123 else {
4124 set_all_inuse(victim);
4125 check_malloced_chunk(victim, nb);
4126 guard_set(av->guard_stored, victim, bytes, nb);
[172]4127 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4128 return chunk(victim);
4129 }
4130 }
4131 victim = victim->bk;
4132 } while(victim != bin);
4133 }
4134 }
4135
4136 /*
4137 Search for a chunk by scanning bins, starting with next largest
4138 bin. This search is strictly by best-fit; i.e., the smallest
4139 (with ties going to approximately the least recently used) chunk
4140 that fits is selected.
4141
4142 The bitmap avoids needing to check that most blocks are nonempty.
4143 */
4144
4145
4146 ++idx;
4147 bin = bin_at(av,idx);
4148 block = idx2block(idx);
4149 map = av->binmap[block];
4150 bit = idx2bit(idx);
4151
4152 for (;;) {
4153
4154 /* Skip rest of block if there are no more set bits in this block. */
4155 if (bit > map || bit == 0) {
4156 do {
4157 if (++block >= BINMAPSIZE) /* out of bins */
4158 goto use_top;
4159 } while ( (map = av->binmap[block]) == 0);
4160
4161 bin = bin_at(av, (block << BINMAPSHIFT));
4162 bit = 1;
4163 }
4164
4165 /* Advance to bin with set bit. There must be one. */
4166 while ((bit & map) == 0) {
4167 bin = next_bin(bin);
4168 bit <<= 1;
4169 assert(bit != 0);
4170 }
4171
4172 /* Inspect the bin. It is likely to be non-empty */
4173 victim = last(bin);
4174
4175 /* If a false alarm (empty bin), clear the bit. */
4176 if (victim == bin) {
4177 av->binmap[block] = map &= ~bit; /* Write through */
4178 bin = next_bin(bin);
4179 bit <<= 1;
4180 }
4181
4182 else {
4183 size = chunksize(victim);
4184
4185 /* We know the first chunk in this bin is big enough to use. */
4186 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4187
4188 remainder_size = size - nb;
4189
4190 /* unlink */
4191 bck = victim->bk;
4192 bin->bk = bck;
4193 bck->fd = bin;
4194
4195 /* Split */
4196 if (remainder_size >= MINSIZE) {
4197 remainder = cireg_getfree();
4198 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4199
4200 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4201 remainder->bk = remainder->fd = unsorted_chunks(av);
4202 /* advertise as last remainder */
[172]4203 if (in_smallbin_range(nb))
4204 av->last_remainder = remainder;
[171]4205
4206 set_head(victim, nb | PREV_INUSE | INUSE);
4207 set_head(remainder, remainder_size | PREV_INUSE);
4208 hashtable_insert(victim, remainder);
4209 check_malloced_chunk(victim, nb);
4210 guard_set(av->guard_stored, victim, bytes, nb);
[172]4211 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4212 return chunk(victim);
4213 }
4214 /* Exhaust */
4215 else {
4216 set_all_inuse(victim);
4217 check_malloced_chunk(victim, nb);
4218 guard_set(av->guard_stored, victim, bytes, nb);
[172]4219 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4220 return chunk(victim);
4221 }
4222
4223 }
4224 }
4225
4226 use_top:
4227
4228
4229 /*
4230 If large enough, split off the chunk bordering the end of memory
4231 (held in av->top). Note that this is in accord with the best-fit
4232 search rule. In effect, av->top is treated as larger (and thus
4233 less well fitting) than any other available chunk since it can
4234 be extended to be as large as necessary (up to system
4235 limitations).
4236
4237 We require that av->top always exists (i.e., has size >=
4238 MINSIZE) after initialization, so if it would otherwise be
4239 exhuasted by current request, it is replenished. (The main
4240 reason for ensuring it exists is that we may need MINSIZE space
4241 to put in fenceposts in sysmalloc.)
4242 */
4243
4244 victim = av->top;
4245 size = chunksize(victim);
4246
4247 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
[172]4248 remainder = cireg_getfree();
[171]4249 remainder_size = size - nb;
4250 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4251 av->top = remainder;
4252 set_head(victim, nb | PREV_INUSE | INUSE);
4253 set_head(remainder, remainder_size | PREV_INUSE);
4254 hashtable_insert(victim, remainder);
4255 check_malloced_chunk(victim, nb);
4256 guard_set(av->guard_stored, victim, bytes, nb);
[172]4257 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4258 return chunk(victim);
4259 }
4260
4261 /*
4262 If no space in top, relay to handle system-dependent cases
4263 */
4264 retval = sYSMALLOc(nb, av);
4265 if (retval) {
4266 victim = mem2chunk(retval);
4267 guard_set(av->guard_stored, victim, bytes, nb);
4268 }
[172]4269 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
[171]4270 return retval;
4271}
4272
4273/*
4274 ------------------------------ free ------------------------------
4275*/
4276
4277#if __STD_C
4278DL_STATIC void fREe(Void_t* mem)
4279#else
4280DL_STATIC void fREe(mem) Void_t* mem;
4281#endif
4282{
4283 mstate av = get_malloc_state();
4284
4285 chunkinfoptr p; /* chunk corresponding to mem */
4286 INTERNAL_SIZE_T size; /* its size */
4287 mfastbinptr* fb; /* associated fastbin */
4288 chunkinfoptr prevchunk; /* previous physical chunk */
4289 chunkinfoptr nextchunk; /* next contiguous chunk */
4290 INTERNAL_SIZE_T nextsize; /* its size */
4291 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4292 chunkinfoptr bck; /* misc temp for linking */
4293 chunkinfoptr fwd; /* misc temp for linking */
4294 chunkinfoptr next;
4295#if defined(SH_CUTEST)
4296 extern int malloc_count;
4297 --malloc_count;
4298#endif
4299
4300 /* free(0) has no effect */
4301 if (mem != 0) {
4302 p = hashtable_lookup(mem);
4303 /* check that memory is managed by us
4304 * and is inuse
4305 */
4306 if (UNLIKELY(!p || !inuse(p)))
4307 {
4308#ifdef DNMALLOC_CHECKS
4309 if (p) {
4310 fprintf(stderr, "Attempt to free memory not in use\n");
4311 abort();
4312 } else {
4313 fprintf(stderr, "Attempt to free memory not allocated\n");
4314 abort();
4315 }
4316#endif
4317 assert(p && inuse(p));
4318 return;
4319 }
4320
[172]4321 VALGRIND_FREELIKE_BLOCK(mem, 0);
4322
[171]4323 guard_check(av->guard_stored, p);
[172]4324
[171]4325 size = chunksize(p);
4326
4327 check_inuse_chunk(p);
4328
4329 /*
4330 If eligible, place chunk on a fastbin so it can be found
4331 and used quickly in malloc.
4332 */
4333
4334 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4335
4336#if TRIM_FASTBINS
4337 /*
4338 If TRIM_FASTBINS set, don't place chunks
4339 bordering top into fastbins
4340 */
4341 && (chunk_at_offset(chunk(p), size) != av->top)
4342#endif
4343 ) {
4344
4345 set_fastchunks(av);
4346 fb = &(av->fastbins[fastbin_index(size)]);
4347 p->fd = *fb;
4348 *fb = p;
4349 }
4350
4351 /*
4352 Consolidate other non-mmapped chunks as they arrive.
4353 */
4354
4355 else if (!chunk_is_mmapped(p)) {
4356 set_anychunks(av);
4357
4358 nextchunk = next_chunkinfo(p);
4359 if (nextchunk)
4360 nextsize = chunksize(nextchunk);
4361 else
4362 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4363
4364 /* consolidate backward */
4365 if (UNLIKELY(!prev_inuse(p))) {
4366 prevchunk = prev_chunkinfo(p);
4367 prevsize = chunksize(prevchunk);
4368#ifdef DNMALLOC_CHECKS
4369 if (inuse(prevchunk)) {
4370 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4371 abort();
4372 }
4373#else
4374 assert(!inuse(prevchunk));
4375#endif
4376 size += prevsize;
4377 unlink(prevchunk, bck, fwd);
4378 set_head(p, size | PREV_INUSE);
4379 hashtable_skiprm(prevchunk,p);
4380 /* This chunk no longer exists in any form: release the chunkinfoptr
4381 */
4382 freecilst_add(p);
4383 p = prevchunk;
4384 }
4385
4386 if (nextchunk) {
4387 if (nextchunk != av->top) {
4388 /* get and clear inuse bit */
4389 clear_previnuse(nextchunk);
4390
4391 /* consolidate forward */
4392 if (!inuse(nextchunk)) {
4393 unlink(nextchunk, bck, fwd);
4394 size += nextsize;
4395 set_head(p, size | PREV_INUSE);
4396 hashtable_skiprm(p, nextchunk);
4397 freecilst_add (nextchunk);
4398 }
4399
4400 set_head(p, size | PREV_INUSE);
4401 next = next_chunkinfo(p);
4402 if (next)
4403 next->prev_size = size;
4404
4405 /*
4406 Place the chunk in unsorted chunk list. Chunks are
4407 not placed into regular bins until after they have
4408 been given one chance to be used in malloc.
4409 */
4410
4411 bck = unsorted_chunks(av);
4412 fwd = bck->fd;
4413 p->bk = bck;
4414 p->fd = fwd;
4415 bck->fd = p;
4416 fwd->bk = p;
4417
4418 nextchunk = next_chunkinfo(p);
4419 if (nextchunk)
4420 nextchunk->prev_size = chunksize(p);
4421
4422 check_free_chunk(p);
4423 }
4424
4425 /*
4426 If the chunk borders the current high end of memory,
4427 consolidate into top
4428 */
4429
4430 else {
4431 size += nextsize;
4432 set_head(p, size | PREV_INUSE);
4433 hashtable_remove(chunk(av->top));
4434 freecilst_add(av->top);
4435 av->top = p;
4436 check_chunk(p);
4437 }
4438 } /* if (nextchunk) */
4439
4440 /*
4441 If freeing a large space, consolidate possibly-surrounding
4442 chunks. Then, if the total unused topmost memory exceeds trim
4443 threshold, ask malloc_trim to reduce top.
4444
4445 Unless max_fast is 0, we don't know if there are fastbins
4446 bordering top, so we cannot tell for sure whether threshold
4447 has been reached unless fastbins are consolidated. But we
4448 don't want to consolidate on each free. As a compromise,
4449 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4450 is reached.
4451 */
4452
4453 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4454 if (have_fastchunks(av))
4455 malloc_consolidate(av);
4456
4457#ifndef MORECORE_CANNOT_TRIM
4458 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4459 (CHUNK_SIZE_T)(av->trim_threshold))
4460 {
4461 if (morecore32bit(av))
4462 {
4463#ifdef DNMALLOC_DEBUG
4464 fprintf(stderr, "Calling systrim from free()\n");
4465#endif
4466 sYSTRIm(av->top_pad, av);
4467#ifdef DNMALLOC_DEBUG
4468 fprintf(stderr, "Systrim done\n");
4469#endif
4470 }
4471 }
4472#endif
4473 }
4474
4475 }
4476 /*
4477 If the chunk was allocated via mmap, release via munmap()
4478 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4479 true, then user must have overwritten memory. There's nothing
4480 we can do to catch this error unless DEBUG is set, in which case
4481 check_inuse_chunk (above) will have triggered error.
4482 */
4483
4484 else {
4485 int ret;
4486 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4487 av->n_mmaps--;
4488 av->mmapped_mem -= (size + offset);
4489 ret = munmap((char*) chunk(p) - offset, size + offset);
4490 hashtable_remove_mmapped(chunk(p));
4491 freecilst_add(p);
4492 /* munmap returns non-zero on failure */
4493 assert(ret == 0);
4494 }
4495 }
4496}
4497
4498/*
4499 ------------------------- malloc_consolidate -------------------------
4500
4501 malloc_consolidate is a specialized version of free() that tears
4502 down chunks held in fastbins. Free itself cannot be used for this
4503 purpose since, among other things, it might place chunks back onto
4504 fastbins. So, instead, we need to use a minor variant of the same
4505 code.
4506
4507 Also, because this routine needs to be called the first time through
4508 malloc anyway, it turns out to be the perfect place to trigger
4509 initialization code.
4510*/
4511
4512#if __STD_C
4513static void malloc_consolidate(mstate av)
4514#else
4515static void malloc_consolidate(av) mstate av;
4516#endif
4517{
4518 mfastbinptr* fb; /* current fastbin being consolidated */
4519 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4520 chunkinfoptr p; /* current chunk being consolidated */
4521 chunkinfoptr nextp; /* next chunk to consolidate */
4522 chunkinfoptr prevp;
4523 chunkinfoptr unsorted_bin; /* bin header */
4524 chunkinfoptr first_unsorted; /* chunk to link to */
4525
4526 /* These have same use as in free() */
4527 chunkinfoptr nextchunk;
4528 INTERNAL_SIZE_T size;
4529 INTERNAL_SIZE_T nextsize;
4530 INTERNAL_SIZE_T prevsize;
4531 chunkinfoptr bck;
4532 chunkinfoptr fwd;
4533 chunkinfoptr next;
4534
4535 /*
4536 If max_fast is 0, we know that av hasn't
4537 yet been initialized, in which case do so below
4538 */
4539 if (av && av->max_fast != 0) {
[172]4540
[171]4541 clear_fastchunks(av);
4542
4543 unsorted_bin = unsorted_chunks(av);
4544
4545 /*
4546 Remove each chunk from fast bin and consolidate it, placing it
4547 then in unsorted bin. Among other reasons for doing this,
4548 placing in unsorted bin avoids needing to calculate actual bins
4549 until malloc is sure that chunks aren't immediately going to be
4550 reused anyway.
4551 */
4552
4553 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4554 fb = &(av->fastbins[0]);
4555 do {
4556 if ( UNLIKELY((p = *fb) != 0)) {
4557 *fb = 0;
4558 do {
4559 check_inuse_chunk(p);
4560 nextp = p->fd;
4561
4562 /*
4563 * Slightly streamlined version of consolidation code in free()
4564 */
4565
4566 size = chunksize(p);
4567 nextchunk = next_chunkinfo(p);
4568
4569 /* gcc doesn't notice that it's only used if (nextchunk) */
4570 if (nextchunk)
4571 nextsize = chunksize(nextchunk);
4572 else
4573 nextsize = 0;
4574
4575 if (!prev_inuse(p)) {
4576 prevp = prev_chunkinfo(p);
4577 prevsize = chunksize(prevp);
4578 size += prevsize;
4579#ifdef DNMALLOC_CHECKS
4580 if (inuse(prevp)) {
4581 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4582 abort();
4583 }
4584#else
4585 assert(!inuse(prevp));
4586#endif
4587 unlink(prevp, bck, fwd);
4588 set_head(p, size | PREV_INUSE);
4589 hashtable_skiprm(prevp,p);
4590 freecilst_add(p);
4591 p=prevp;
4592 }
4593
4594 if (nextchunk) {
4595 if (nextchunk != av->top) {
4596
4597 clear_previnuse(nextchunk);
4598
[172]4599 /* if mmap is used instead of sbrk, we may have a
4600 * chunk with !nextchunk->fd && !nextchunk->bk
4601 */
[171]4602 if (!inuse(nextchunk)) {
[172]4603 if( nextchunk->fd && nextchunk->bk) {
4604 size += nextsize;
4605 unlink(nextchunk, bck, fwd);
4606 set_head(p, size | PREV_INUSE);
4607 hashtable_skiprm(p,nextchunk);
4608 freecilst_add(nextchunk);
4609 }
[171]4610 }
4611
4612 first_unsorted = unsorted_bin->fd;
4613 unsorted_bin->fd = p;
4614 first_unsorted->bk = p;
4615
4616 set_head(p, size | PREV_INUSE);
4617 p->bk = unsorted_bin;
4618 p->fd = first_unsorted;
4619 next = next_chunkinfo(p);
4620 if (next)
4621 next->prev_size = size;
4622
4623
4624 }
4625
[172]4626 else if (nextchunk == av->top) {
[171]4627 size += nextsize;
4628 set_head(p, size | PREV_INUSE);
4629 hashtable_remove(chunk(av->top));
4630 freecilst_add(av->top);
4631 av->top = p;
4632 }
[172]4633 } /* if (nextchunk) */
[171]4634
4635 } while ( (p = nextp) != 0);
4636
4637 }
4638 } while (fb++ != maxfb);
4639 }
4640 else {
4641 // Initialize dnmalloc
4642 dnmalloc_init();
4643 malloc_init_state(get_malloc_state());
4644 check_malloc_state();
4645 }
4646}
4647
4648/*
4649 ------------------------------ realloc ------------------------------
4650*/
4651
4652
4653#if __STD_C
4654DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4655#else
4656DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4657#endif
4658{
4659 mstate av = get_malloc_state();
4660
4661 INTERNAL_SIZE_T nb; /* padded request size */
4662
4663 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4664 INTERNAL_SIZE_T oldsize; /* its size */
4665
4666 chunkinfoptr newp; /* chunk to return */
4667 INTERNAL_SIZE_T newsize; /* its size */
4668 Void_t* newmem; /* corresponding user mem */
4669
4670 chunkinfoptr next; /* next contiguous chunk after oldp */
4671
4672 chunkinfoptr remainder; /* extra space at end of newp */
4673 CHUNK_SIZE_T remainder_size; /* its size */
4674
4675 chunkinfoptr bck; /* misc temp for linking */
4676 chunkinfoptr fwd; /* misc temp for linking */
4677
4678 CHUNK_SIZE_T copysize; /* bytes to copy */
4679 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4680 INTERNAL_SIZE_T* s; /* copy source */
4681 INTERNAL_SIZE_T* d; /* copy destination */
4682
4683
4684#ifdef REALLOC_ZERO_BYTES_FREES
4685 if (UNLIKELY(bytes == 0)) {
4686 fREe(oldmem);
4687 return 0;
4688 }
4689#endif
4690
4691 if (UNLIKELY(!av || av->max_fast == 0)) {
4692 malloc_consolidate(av);
4693 av = get_malloc_state();
4694 }
4695
4696 /* realloc of null is supposed to be same as malloc */
4697 if (UNLIKELY(oldmem == 0))
4698 return mALLOc(bytes);
4699
4700 checked_request2size(bytes, nb);
4701
4702 oldp = hashtable_lookup(oldmem);
4703
4704 if (UNLIKELY(!oldp || !inuse(oldp))){
4705 /* attempt to either realloc memory not managed by us
4706 * or memory that is not in use
4707 */
4708#ifdef DNMALLOC_CHECKS
4709 if (oldp) {
4710 fprintf(stderr, "Attempt to free memory not in use\n");
4711 abort();
4712 } else {
4713 fprintf(stderr, "Attempt to free memory not allocated\n");
4714 abort();
4715 }
4716#endif
4717 assert(oldp && inuse(oldp));
4718 return 0;
4719 }
4720
[172]4721 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
[171]4722 guard_check(av->guard_stored, oldp);
4723
4724 oldsize = chunksize(oldp);
4725
4726 check_inuse_chunk(oldp);
4727
4728 if (!chunk_is_mmapped(oldp)) {
4729
4730 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4731 /* already big enough; split below */
4732 newp = oldp;
4733 newsize = oldsize;
4734 }
4735
4736 else {
4737 next = next_chunkinfo(oldp);
4738 if (next)
4739 next->prev_size = oldsize;
4740 /* Try to expand forward into top */
4741 if (next && next == av->top &&
4742 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4743 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4744 set_head_size(oldp, nb);
4745 hashtable_remove(chunk(av->top));
4746 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4747 set_head(av->top, (newsize - nb) | PREV_INUSE);
4748 /* av->top->chunk has been moved move in hashtable */
4749 hashtable_insert(oldp, av->top);
4750 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4751 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4752 return chunk(oldp);
4753 }
4754
4755 /* Try to expand forward into next chunk; split off remainder below */
4756 else if (next && next != av->top &&
4757 !inuse(next) &&
4758 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4759 (CHUNK_SIZE_T)(nb)) {
4760 newp = oldp;
4761 unlink(next, bck, fwd);
4762 hashtable_remove(chunk(next));
4763 freecilst_add(next);
4764 next = next_chunkinfo(oldp);
4765 if (next)
4766 next->prev_size = newsize;
4767 }
4768
4769 /* allocate, copy, free */
4770 else {
4771
4772 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
[172]4773 if (newmem == 0)
[171]4774 return 0; /* propagate failure */
4775
4776 newp = hashtable_lookup(newmem);
4777 newsize = chunksize(newp);
4778
[172]4779
[171]4780 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4781 /*
4782 Avoid copy if newp is next chunk after oldp.
4783 */
4784 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4785 newsize += oldsize;
4786 set_head_size(oldp, newsize);
4787 hashtable_skiprm(oldp, newp);
4788 freecilst_add(newp);
4789 newp = oldp;
4790 }
4791 else {
4792 /*
4793 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4794 We know that contents have an even number of
4795 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4796 */
4797
[172]4798 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4799
[171]4800 copysize = oldsize;
4801 s = (INTERNAL_SIZE_T*)(oldmem);
4802 d = (INTERNAL_SIZE_T*)(newmem);
4803 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4804 assert(ncopies >= 2);
4805
4806 if (ncopies > 10)
4807 MALLOC_COPY(d, s, copysize);
4808
4809 else {
4810 *(d+0) = *(s+0);
4811 *(d+1) = *(s+1);
4812 if (ncopies > 2) {
4813 *(d+2) = *(s+2);
4814 *(d+3) = *(s+3);
4815 if (ncopies > 4) {
4816 *(d+4) = *(s+4);
4817 *(d+5) = *(s+5);
4818 if (ncopies > 6) {
4819 *(d+6) = *(s+6);
4820 *(d+7) = *(s+7);
4821 if (ncopies > 8) {
4822 *(d+8) = *(s+8);
4823 *(d+9) = *(s+9);
4824 }
4825 }
4826 }
4827 }
4828 }
4829
4830 fREe(oldmem);
4831 check_inuse_chunk(newp);
4832 guard_set(av->guard_stored, newp, bytes, nb);
4833 return chunk(newp);
4834 }
4835 }
4836 }
4837
4838 /* If possible, free extra space in old or extended chunk */
4839
4840 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4841
4842 remainder_size = newsize - nb;
4843
4844 if (remainder_size >= MINSIZE) { /* split remainder */
4845 remainder = cireg_getfree();
4846 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4847 set_head_size(newp, nb);
4848 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4849 remainder->prev_size = nb;
4850 hashtable_insert(newp, remainder);
4851 /* Mark remainder as inuse so free() won't complain */
4852 set_all_inuse(remainder);
4853 guard_set(av->guard_stored, remainder, 0, remainder_size);
[172]4854 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
[171]4855 fREe(chunk(remainder));
4856 }
4857 else { /* not enough extra to split off */
4858 set_head_size(newp, newsize);
4859 set_all_inuse(newp);
4860 }
4861
4862 check_inuse_chunk(newp);
4863 guard_set(av->guard_stored, newp, bytes, nb);
[172]4864 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
[171]4865 return chunk(newp);
4866 }
4867
4868 /*
4869 Handle mmap cases
4870 */
4871
4872 else {
4873
4874#if HAVE_MREMAP
4875 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4876 size_t pagemask = av->pagesize - 1;
4877 char *cp;
4878 CHUNK_SIZE_T sum;
4879
[172]4880
[171]4881 /* Note the extra SIZE_SZ overhead */
4882 //newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4883 newsize = (nb + offset + pagemask) & ~pagemask;
4884
4885 /* don't need to remap if still within same page */
4886 if (oldsize == newsize - offset)
4887 {
4888 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4889 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4890 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
[171]4891 return oldmem;
4892 }
4893
4894 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4895
4896 if (cp != (char*)MORECORE_FAILURE) {
4897
4898 hashtable_remove_mmapped(chunk(oldp));
4899
4900 oldp->chunk = (mchunkptr)(cp + offset);
4901 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4902
4903 hashtable_add(oldp);
4904
4905 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4906 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4907
4908 /* update statistics */
4909 sum = av->mmapped_mem += newsize - oldsize;
4910 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4911 av->max_mmapped_mem = sum;
4912 sum += av->sbrked_mem;
4913 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4914 av->max_total_mem = sum;
4915
4916 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4917 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4918 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4919 return chunk(oldp);
4920 }
4921#endif /* have MREMAP */
4922
4923 /* Note the extra SIZE_SZ overhead. */
4924 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4925 newmem = oldmem; /* do nothing */
4926 else {
4927 /* Must alloc, copy, free. */
4928 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4929 if (newmem != 0) {
4930 MALLOC_COPY(newmem, oldmem, oldsize);
4931 fREe(oldmem);
4932 }
4933 }
[172]4934 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
[171]4935 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4936 return newmem;
4937 }
4938}
4939
4940/*
4941 ---------------------------posix_memalign ----------------------------
4942*/
4943
4944#if __STD_C
4945DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4946#else
4947DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4948#endif
4949{
4950 mstate av;
4951
4952 if (alignment % sizeof(void *) != 0)
4953 return EINVAL;
4954 if ((alignment & (alignment - 1)) != 0)
4955 return EINVAL;
4956
4957 av = get_malloc_state();
4958 if (!av || av->max_fast == 0) malloc_consolidate(av);
4959 *memptr = mEMALIGn(alignment, bytes);
4960
4961 return (*memptr != NULL ? 0 : ENOMEM);
4962}
4963
4964/*
4965 ------------------------------ memalign ------------------------------
4966*/
4967
4968#if __STD_C
4969DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
4970#else
4971DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4972#endif
4973{
4974 INTERNAL_SIZE_T nb; /* padded request size */
4975 char* m; /* memory returned by malloc call */
4976 chunkinfoptr p; /* corresponding chunk */
4977 char* brk; /* alignment point within p */
4978 chunkinfoptr newp; /* chunk to return */
4979 INTERNAL_SIZE_T newsize; /* its size */
4980 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4981 chunkinfoptr remainder; /* spare room at end to split off */
4982 CHUNK_SIZE_T remainder_size; /* its size */
4983 INTERNAL_SIZE_T size;
4984 mstate av;
4985
4986 /* If need less alignment than we give anyway, just relay to malloc */
4987
4988 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
4989
4990 /* Otherwise, ensure that it is at least a minimum chunk size */
4991
4992 if (alignment < MINSIZE) alignment = MINSIZE;
4993
4994 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4995 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
4996 size_t a = MALLOC_ALIGNMENT * 2;
4997 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
4998 alignment = a;
4999 }
5000
5001 checked_request2size(bytes, nb);
5002
5003 /*
5004 Strategy: find a spot within that chunk that meets the alignment
5005 request, and then possibly free the leading and trailing space.
5006 */
5007
5008
5009 /* Call malloc with worst case padding to hit alignment. */
5010
5011 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5012
5013 if (m == 0) return 0; /* propagate failure */
5014
5015 av = get_malloc_state();
5016
5017 p = hashtable_lookup((mchunkptr) m);
5018
5019 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5020
5021 /*
5022 Find an aligned spot inside chunk. Since we need to give back
5023 leading space in a chunk of at least MINSIZE, if the first
5024 calculation places us at a spot with less than MINSIZE leader,
5025 we can move to the next aligned spot -- we've allocated enough
5026 total room so that this is always possible.
5027 */
5028
5029 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5030 -((signed long) alignment)));
5031 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5032 brk += alignment;
5033
5034 newp = cireg_getfree();
5035 newp->chunk = (mchunkptr)brk;
5036 leadsize = brk - (char*)(chunk(p));
5037 newsize = chunksize(p) - leadsize;
5038
5039 /* For mmapped chunks, just adjust offset */
5040 if (UNLIKELY(chunk_is_mmapped(p))) {
5041 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5042 set_head(newp, newsize|IS_MMAPPED|INUSE);
5043 hashtable_remove_mmapped(chunk(p));
5044 freecilst_add(p);
5045 hashtable_add(newp);
5046 guard_set(av->guard_stored, newp, bytes, nb);
5047 return chunk(newp);
5048 }
5049
5050 /* Otherwise, give back leader, use the rest */
5051 set_head(newp, newsize | PREV_INUSE | INUSE);
5052 set_head_size(p, leadsize);
5053 set_all_inuse(newp);
5054 hashtable_add(newp); /* 20.05.2008 rw */
5055 guard_set(av->guard_stored, p, 0, leadsize);
5056 fREe(chunk(p));
5057 p = newp;
5058
5059 assert (newsize >= nb &&
5060 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5061 }
5062
5063 /* Also give back spare room at the end */
5064 if (!chunk_is_mmapped(p)) {
5065 size = chunksize(p);
5066 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5067 remainder = cireg_getfree();
5068 remainder_size = size - nb;
5069 remainder->chunk = chunk_at_offset(chunk(p), nb);
5070 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5071 set_head_size(p, nb);
5072 hashtable_add(remainder); /* 20.05.2008 rw */
5073 guard_set(av->guard_stored, remainder, 0, remainder_size);
5074 fREe(chunk(remainder));
5075 }
5076 }
5077
5078 check_inuse_chunk(p);
5079 guard_set(av->guard_stored, p, bytes, nb);
5080 return chunk(p);
5081}
5082
5083/*
5084 ------------------------------ calloc ------------------------------
5085*/
5086
5087#if __STD_C
5088DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5089#else
5090DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5091#endif
5092{
5093 chunkinfoptr p;
5094 CHUNK_SIZE_T clearsize;
5095 CHUNK_SIZE_T nclears;
5096 INTERNAL_SIZE_T* d;
5097 Void_t* mem;
5098
5099
5100 mem = mALLOc(n_elements * elem_size);
5101
5102 if (mem != 0) {
5103 p = hashtable_lookup(mem);
5104
5105 if (!chunk_is_mmapped(p))
5106 {
5107 /*
5108 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5109 We know that contents have an even number of
5110 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5111 */
5112
5113 d = (INTERNAL_SIZE_T*)mem;
5114 clearsize = chunksize(p);
5115 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5116 assert(nclears >= 2);
5117
5118 if (nclears > 10) {
5119 MALLOC_ZERO(d, clearsize);
5120 }
5121
5122 else {
5123 *(d+0) = 0;
5124 *(d+1) = 0;
5125 if (nclears > 2) {
5126 *(d+2) = 0;
5127 *(d+3) = 0;
5128 if (nclears > 4) {
5129 *(d+4) = 0;
5130 *(d+5) = 0;
5131 if (nclears > 6) {
5132 *(d+6) = 0;
5133 *(d+7) = 0;
5134 if (nclears > 8) {
5135 *(d+8) = 0;
5136 *(d+9) = 0;
5137 }
5138 }
5139 }
5140 }
5141 }
5142 }
5143#if ! MMAP_CLEARS
5144 else
5145 {
5146 d = (INTERNAL_SIZE_T*)mem;
5147 clearsize = chunksize(p);
5148 MALLOC_ZERO(d, clearsize);
5149 }
5150#endif
5151 /* Set guard again, since we just cleared it
5152 */
5153 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5154 }
5155
5156 return mem;
5157}
5158
5159/*
5160 ------------------------------ valloc ------------------------------
5161*/
5162
5163#if __STD_C
5164DL_STATIC Void_t* vALLOc(size_t bytes)
5165#else
5166DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5167#endif
5168{
5169 /* Ensure initialization */
5170 mstate av = get_malloc_state();
5171 if (!av || av->max_fast == 0) {
5172 malloc_consolidate(av);
5173 av = get_malloc_state();
5174 }
5175 return mEMALIGn(av->pagesize, bytes);
5176}
5177
5178/*
5179 ------------------------------ pvalloc ------------------------------
5180*/
5181
5182
5183#if __STD_C
5184DL_STATIC Void_t* pVALLOc(size_t bytes)
5185#else
5186DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5187#endif
5188{
5189 mstate av = get_malloc_state();
5190 size_t pagesz;
5191
5192 /* Ensure initialization */
5193 if (!av || av->max_fast == 0) {
5194 malloc_consolidate(av);
5195 av = get_malloc_state();
5196 }
5197 pagesz = av->pagesize;
5198 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5199}
5200
5201
5202/*
5203 ------------------------------ malloc_trim ------------------------------
5204*/
5205
5206#if __STD_C
5207DL_STATIC int mTRIm(size_t pad)
5208#else
5209DL_STATIC int mTRIm(pad) size_t pad;
5210#endif
5211{
5212 mstate av = get_malloc_state();
5213 /* Ensure initialization/consolidation */
5214 malloc_consolidate(av);
5215 av = get_malloc_state();
5216#ifndef MORECORE_CANNOT_TRIM
5217 if (morecore32bit(av))
5218 return sYSTRIm(pad, av);
5219 else
5220 return 0;
5221#else
5222 return 0;
5223#endif
5224}
5225
5226
5227
5228/*
5229 ------------------------- malloc_usable_size -------------------------
5230*/
5231
5232#if __STD_C
5233DL_STATIC size_t mUSABLe(Void_t* mem)
5234#else
5235DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5236#endif
5237{
5238 chunkinfoptr p;
5239 if (mem != 0) {
5240 p = hashtable_lookup(mem);
5241 if (p && inuse(p)) return chunksize(p);
5242 }
5243 return 0;
5244}
5245
5246/*
5247 ------------------------------ mallinfo ------------------------------
5248*/
5249
5250DL_STATIC struct mallinfo mALLINFo()
5251{
5252 mstate av = get_malloc_state();
5253 struct mallinfo mi;
5254 unsigned int i;
5255 mbinptr b;
5256 chunkinfoptr p;
5257 INTERNAL_SIZE_T avail;
5258 INTERNAL_SIZE_T fastavail;
5259 int nblocks;
5260 int nfastblocks;
5261
5262 /* Ensure initialization */
5263 if (!av || av->top == 0) {
5264 malloc_consolidate(av);
5265 av = get_malloc_state();
5266 }
5267 check_malloc_state();
5268
5269 /* Account for top */
5270 avail = chunksize(av->top);
5271 nblocks = 1; /* top always exists */
5272
5273 /* traverse fastbins */
5274 nfastblocks = 0;
5275 fastavail = 0;
5276
5277 for (i = 0; i < NFASTBINS; ++i) {
5278 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5279 ++nfastblocks;
5280 fastavail += chunksize(p);
5281 }
5282 }
5283
5284 avail += fastavail;
5285
5286 /* traverse regular bins */
5287 for (i = 1; i < NBINS; ++i) {
5288 b = bin_at(av, i);
5289 for (p = last(b); p != b; p = p->bk) {
5290 ++nblocks;
5291 avail += chunksize(p);
5292 }
5293 }
5294
5295 mi.smblks = nfastblocks;
5296 mi.ordblks = nblocks;
5297 mi.fordblks = avail;
5298 mi.uordblks = av->sbrked_mem - avail;
5299 mi.arena = av->sbrked_mem;
5300 mi.hblks = av->n_mmaps;
5301 mi.hblkhd = av->mmapped_mem;
5302 mi.fsmblks = fastavail;
5303 mi.keepcost = chunksize(av->top);
5304 mi.usmblks = av->max_total_mem;
5305 return mi;
5306}
5307
5308/*
5309 ------------------------------ malloc_stats ------------------------------
5310*/
5311
5312DL_STATIC void mSTATs()
5313{
5314 struct mallinfo mi = mALLINFo();
5315
5316 fprintf(stderr, "hashtable = %10lu MB\n",
5317 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5318 fprintf(stderr, "max system bytes = %10lu\n",
5319 (CHUNK_SIZE_T)(mi.usmblks));
5320 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5321 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5322 (CHUNK_SIZE_T)(mi.arena),
5323 (CHUNK_SIZE_T)(mi.hblkhd));
5324 fprintf(stderr, "in use bytes = %10lu\n",
5325 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5326
5327}
5328
5329
5330/*
5331 ------------------------------ mallopt ------------------------------
5332*/
5333
5334#if __STD_C
5335DL_STATIC int mALLOPt(int param_number, int value)
5336#else
5337DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5338#endif
5339{
5340 mstate av = get_malloc_state();
5341 /* Ensure initialization/consolidation */
5342 malloc_consolidate(av);
5343 av = get_malloc_state();
5344
5345 switch(param_number) {
5346 case M_MXFAST:
5347 if (value >= 0 && value <= MAX_FAST_SIZE) {
5348 set_max_fast(av, value);
5349 return 1;
5350 }
5351 else
5352 return 0;
5353
5354 case M_TRIM_THRESHOLD:
5355 av->trim_threshold = value;
5356 return 1;
5357
5358 case M_TOP_PAD:
5359 av->top_pad = value;
5360 return 1;
5361
5362 case M_MMAP_THRESHOLD:
5363 av->mmap_threshold = value;
5364 return 1;
5365
5366 case M_MMAP_MAX:
5367 if (value != 0)
5368 return 0;
5369 av->n_mmaps_max = value;
5370 return 1;
5371
5372 default:
5373 return 0;
5374 }
5375}
5376
5377
5378/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5379
5380/*
5381 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5382 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5383 *
5384 * Permission to use, copy, modify, and distribute this software for any
5385 * purpose with or without fee is hereby granted, provided that the above
5386 * copyright notice and this permission notice appear in all copies.
5387 *
5388 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5389 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5390 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5391 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5392 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5393 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5394 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5395 */
5396
5397/*
5398 * Arc4 random number generator for OpenBSD.
5399 *
5400 * This code is derived from section 17.1 of Applied Cryptography,
5401 * second edition, which describes a stream cipher allegedly
5402 * compatible with RSA Labs "RC4" cipher (the actual description of
5403 * which is a trade secret). The same algorithm is used as a stream
5404 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5405 *
5406 * Here the stream cipher has been modified always to include the time
5407 * when initializing the state. That makes it impossible to
5408 * regenerate the same random sequence twice, so this can't be used
5409 * for encryption, but will generate good random numbers.
5410 *
5411 * RC4 is a registered trademark of RSA Laboratories.
5412 */
5413
5414/* Moved u_int8_t -> unsigned char (portability)
5415 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5416 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5417 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5418 * This is arc4random(3) using urandom.
5419 */
5420
5421#include <fcntl.h>
5422#include <limits.h>
5423#include <stdlib.h>
5424#include <sys/param.h>
5425#include <sys/time.h>
5426
5427struct arc4_stream {
5428 unsigned char i;
5429 unsigned char j;
5430 unsigned char s[256];
5431};
5432
5433static int rs_initialized;
5434static struct arc4_stream rs;
5435static pid_t arc4_stir_pid;
5436static int arc4_count;
5437
5438static unsigned char arc4_getbyte(void);
5439
5440static void
5441arc4_init(void)
5442{
5443 int n;
5444
5445 for (n = 0; n < 256; n++)
5446 rs.s[n] = n;
5447 rs.i = 0;
5448 rs.j = 0;
5449}
5450
5451static inline void
5452arc4_addrandom(unsigned char *dat, int datlen)
5453{
5454 int n;
5455 unsigned char si;
5456
5457 rs.i--;
5458 for (n = 0; n < 256; n++) {
5459 rs.i = (rs.i + 1);
5460 si = rs.s[rs.i];
5461 rs.j = (rs.j + si + dat[n % datlen]);
5462 rs.s[rs.i] = rs.s[rs.j];
5463 rs.s[rs.j] = si;
5464 }
5465 rs.j = rs.i;
5466}
5467
5468#ifdef HAVE_SCHED_H
5469#include <sched.h>
5470#endif
5471
5472static void
5473arc4_stir(void)
5474{
5475 int i;
5476 struct {
5477 struct timeval tv1;
5478 struct timeval tv2;
5479 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5480 } rdat;
5481#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5482 size_t sz = 0;
5483 int fd;
5484#endif
5485
5486 gettimeofday(&rdat.tv1, NULL);
5487
5488
5489 if (!rs_initialized) {
5490 arc4_init();
5491 rs_initialized = 1;
5492 }
5493
5494#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5495
5496#ifdef HAVE_SCHED_YIELD
5497 /* Yield the processor to introduce some random delay. */
5498 (void) sched_yield();
5499#endif
5500
5501 /*
5502 * Pthread problem in multithreaded code on *BSD.
5503 */
5504 fd = open("/dev/urandom", O_RDONLY);
5505 if (fd != -1) {
5506 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5507 close(fd);
5508 }
5509 if (sz > sizeof (rdat.rnd))
5510 sz = 0;
5511 #endif
5512
5513 arc4_stir_pid = getpid();
5514 gettimeofday(&rdat.tv2, NULL);
5515
5516 arc4_addrandom((void *)&rdat, sizeof(rdat));
5517
5518 /*
5519 * Discard early keystream, as per recommendations in:
5520 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5521 */
5522 for (i = 0; i < 256; i++)
5523 (void)arc4_getbyte();
5524 arc4_count = 1600000;
5525}
5526
5527static unsigned char
5528arc4_getbyte(void)
5529{
5530 unsigned char si, sj;
5531
5532 rs.i = (rs.i + 1);
5533 si = rs.s[rs.i];
5534 rs.j = (rs.j + si);
5535 sj = rs.s[rs.j];
5536 rs.s[rs.i] = sj;
5537 rs.s[rs.j] = si;
5538 return (rs.s[(si + sj) & 0xff]);
5539}
5540
5541
5542 /* Changed to return char* */
5543static char *
5544dnmalloc_arc4random(void)
5545{
5546 static char val[4];
5547
5548 /* We only call this once, hence no need for locking. */
5549
5550 /* _ARC4_LOCK(); */
5551 arc4_count -= 4;
5552 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5553 arc4_stir();
5554
5555 val[0] = (char) arc4_getbyte();
5556 val[1] = (char) arc4_getbyte();
5557 val[2] = (char) arc4_getbyte();
5558 val[3] = (char) arc4_getbyte();
5559
5560 arc4_stir();
5561 /* _ARC4_UNLOCK(); */
5562 return val;
5563}
5564
5565#else
5566int dnmalloc_pthread_init() { return 0; }
5567#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.