source: trunk/src/dnmalloc.c@ 183

Last change on this file since 183 was 180, checked in by katerina, 16 years ago

Process lookup (Linux) for open ports (ticket #117). Also fix for minor dnmalloc bug (ticket #119).

File size: 163.7 KB
RevLine 
[171]1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
[172]2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
[171]5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
8 Yves Younan
9 Celestijnenlaan 200A
10 B-3001 Heverlee
11 Belgium
12
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 2.1 of the License, or (at your option) any later version.
17
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26
27*/
28
[172]29/* Current version: dnmalloc 1.0 */
[171]30/* Includes arc4random from OpenBSD, which is under the BDS license */
31
32/* Versions:
33 0.1-0.5:
34 Proof of concept implementation by Hans Van den Eynden and Yves Younan
35 0.6-0.7:
36 Bug fixes by Yves Younan
37 0.8-1.0.beta4:
38 Reimplementation from scratch by Yves Younan
39 1.0.beta4:
40 Public release
41 1.0.beta5:
42 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
43 A freechunkinfo region is now freed when it is completely empty and
44 not the current one
45
46 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
47 ---------------------
48
49 Compiler warnings fixed
50 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
51 (and what the standard says)
52 Removed unused code
53 Fix assert(aligned_OK(chunk(newp)));
54 -> assert(aligned_OK(chunk(oldp)));
55 Fix statistics in sYSMALLOc
56 Fix overwrite of av->top in sYSMALLOc
57 Provide own assert(), glibc assert() doesn't work (calls malloc)
58 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
59 Remove cfree, independent_cmalloc, independent_comalloc (untested
60 public functions not covered by any standard)
61 Provide posix_memalign (that one is in the standard)
62 Move the malloc_state struct to mmapped memory protected by guard pages
63 Add arc4random function to initialize random canary on startup
64 Implement random canary at end of (re|m)alloced/memaligned buffer,
65 check at free/realloc
66 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
67 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
68
69 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
70 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
71 checked_request2size (gcc predicts if{} branch to be taken).
72 Use UNLIKELY macro (gcc __builtin_expect()) where branch
73 reordering would make the code awkward.
74
75 Portability: Hashtable always covers full 32bit address space to
76 avoid assumptions about memory layout.
77 Portability: Try hard to enforce mapping of mmapped memory into
78 32bit address space, even on 64bit systems.
79 Portability: Provide a dnmalloc_pthread_init() function, since
80 pthread locking on HP-UX only works if initialized
81 after the application has entered main().
82 Portability: On *BSD, pthread_mutex_lock is unusable since it
83 calls malloc, use spinlocks instead.
84 Portability: Dynamically detect whether the heap is within
85 32bit address range (e.g. on Linux x86_64, it isn't).
86 Don't use sbrk() if the heap is mapped to an address
87 outside the 32bit range, since this doesn't work with
88 the hashtable. New macro morecore32bit.
89
90 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
91 FreeBSD/pthread, and Solaris 10 i386/pthread.
92 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
93 might be related to OpenBSD pthread internals (??).
94 Non-treaded version (#undef USE_MALLOC_LOC)
95 works on OpenBSD.
96
[172]97 further to 1.0:
98 Valgrind client requests inserted (#define USE_VALGRIND)
[173]99 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
[172]100 Portability: minsize = 32 bit on 64bit architecture
101 Minor cleanups
[173]102 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
[172]103
[171]104 There may be some bugs left in this version. please use with caution.
105*/
106
107
108
109/* Please read the following papers for documentation:
110
111 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
112 Countermeasures against Current and Future Code Injection Attacks,
113 Proceedings of the Third IEEE International Information Assurance
114 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
115 IEEE, IEEE Press.
116 http://www.fort-knox.org/younany_countermeasures.pdf
117
118 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
119 Eynden. Security of Memory Allocators for C and C++. Technical Report
120 CW419, Departement Computerwetenschappen, Katholieke Universiteit
121 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
122
123 */
124
125/* Compile:
126 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
127 "Link":
128 Dynamic:
129 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
130 Static:
131 ar -rv libdnmalloc.a dnmalloc-portable.o
132
133*/
134
135/*
136 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
137 dlmalloc was released as public domain and contained the following license:
138
139 "This is a version (aka dlmalloc) of malloc/free/realloc written by
140 Doug Lea and released to the public domain. Use, modify, and
141 redistribute this code without permission or acknowledgement in any
142 way you wish. Send questions, comments, complaints, performance
143 data, etc to dl@cs.oswego.edu
144
145 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
146
147 Note: There may be an updated version of this malloc obtainable at
148 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
149 Check before installing!"
150
151*/
152
153/* The following preprocessor macros are tested,
154 * and hence should have #define directives:
155 *
156 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
157 *
158 * HAVE_UNISTD_H Define to #include <unistd.h>
159 *
160 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
161 * HAVE_WRITEV Define if the 'writev' function is available
162 *
163 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
164 *
165 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
166 *
167 * HAVE_FCNTL_H Define to #include <fcntl.h>
168 *
169 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
170 * HAVE_MMAP Define if the 'mmap' function is available.
171 *
172 * HAVE_SCHED_H Define to #include <sched.h>
173 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
174 */
175
176
177/*
178 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
179 compiler, or a C compiler sufficiently close to ANSI to get away
180 with it.
181*/
182
183#ifdef HAVE_CONFIG_H
184#include "config.h"
185#endif
186
[172]187#ifdef USE_VALGRIND
188#include <valgrind/memcheck.h>
189#else
190#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
191#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
192#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
193#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
194#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
195#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
196#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
197#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
198#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
199#endif
200
[171]201#if defined (__GNUC__) && __GNUC__ > 2
202# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
203# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
204# define __attribute_malloc__ __attribute__ ((__malloc__))
205#else
206# define LIKELY(x) (x)
207# define UNLIKELY(x) (x)
208# define __attribute_malloc__ /* Ignore */
209#endif
210
211/*
212 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
213 large blocks. This is currently only possible on Linux with
214 kernel versions newer than 1.3.77.
215*/
216
217#ifndef HAVE_MREMAP
218#ifdef linux
219#define HAVE_MREMAP 1
220#define _GNU_SOURCE
221#else
222#define HAVE_MREMAP 0
223#endif
224#endif /* HAVE_MREMAP */
225
226
227
228#ifndef __STD_C
229#if defined(__STDC__) || defined(_cplusplus)
230#define __STD_C 1
231#else
232#define __STD_C 0
233#endif
234#endif /*__STD_C*/
235
236
237/*
238 Void_t* is the pointer type that malloc should say it returns
239*/
240
241#ifndef Void_t
242#if (__STD_C || defined(WIN32))
243#define Void_t void
244#else
245#define Void_t char
246#endif
247#endif /*Void_t*/
248
249#if __STD_C
250#include <stddef.h> /* for size_t */
251#else
252#include <sys/types.h>
253#endif
254
255#if !defined(USE_SYSTEM_MALLOC)
256
257#ifdef __cplusplus
258extern "C" {
259#endif
260
261/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
262
263#ifdef HAVE_UNISTD_H
264#include <unistd.h>
265#endif
266
267#ifdef HAVE_SYS_UIO_H
268#include <sys/uio.h>
269#endif
270
271#include <stdio.h> /* needed for malloc_stats */
272#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
273
274#include <string.h>
275#include <stdlib.h>
276
277#include <sys/resource.h>
278
279extern int errno;
280
281 /* 0: lazy,
282 * 1: medium (assertions compiled in),
283 * 2: high (guard pages at end of hash table and ciregions)
284 * 3: paranoid (guards at end of each allocated chunk, check at free)
285 */
286#ifndef PARANOIA
287#define PARANOIA 9
288#endif
289
290 /* Using assert() with multithreading will cause the code
291 * to deadlock since glibc __assert_fail will call malloc().
292 * We need our very own assert().
293 */
294typedef void assert_handler_tp(const char * error, const char *file, int line);
295
296#if PARANOIA > 0
297
298#ifdef NDEBUG
299#undef NDEBUG
300#endif
301
302static void default_assert_handler(const char *error,
303 const char *file, int line)
304{
305#ifdef HAVE_WRITEV
306 struct iovec iov[5];
307 char * i1 = "assertion failed (";
308 char * i3 = "): ";
309 char * i5 = "\n";
310
311 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
312 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
313 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
314 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
315 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
316 writev(STDERR_FILENO, iov, 5);
317#else
318 fputs("assertion failed (", stderr);
319 fputs(file, stderr);
320 fputs("): ", stderr);
321 fputs(error, stderr);
322 fputc('\n', stderr);
323#endif
[172]324 (void) line;
[171]325 abort();
326}
327static assert_handler_tp *assert_handler = default_assert_handler;
328
[172]329
[171]330#define assert(x) \
331 do { \
332 if (UNLIKELY(!(x))) { \
333 assert_handler(#x, __FILE__, __LINE__); \
334 } \
335 } while (0)
336
337#else
338
339static assert_handler_tp *assert_handler = NULL;
340#define NDEBUG
341#define assert(x) ((void)0)
342
343#endif
344
345assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
346{
347 assert_handler_tp *old = assert_handler;
348 assert_handler = new;
349 return old;
350}
351
352
353#include <stdarg.h>
354
355 /* define for debugging */
356 /* #define DNMALLOC_DEBUG */
357
[172]358 /* Do some extra checks? if not, covered by assrt()s */
[171]359 /* #define DNMALLOC_CHECKS */
360
361 /*
362 The unsigned integer type used for comparing any two chunk sizes.
363 This should be at least as wide as size_t, but should not be signed.
364 */
365
366#ifndef CHUNK_SIZE_T
367#define CHUNK_SIZE_T unsigned long
368#endif
369
370/*
371 The unsigned integer type used to hold addresses when they are are
372 manipulated as integers. Except that it is not defined on all
373 systems, intptr_t would suffice.
374*/
375#ifndef PTR_UINT
376#define PTR_UINT unsigned long
377#endif
378
379
380/*
381 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
382 of chunk sizes.
383
384 The default version is the same as size_t.
385
386 While not strictly necessary, it is best to define this as an
387 unsigned type, even if size_t is a signed type. This may avoid some
388 artificial size limitations on some systems.
389
390 On a 64-bit machine, you may be able to reduce malloc overhead by
391 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
392 expense of not being able to handle more than 2^32 of malloced
393 space. If this limitation is acceptable, you are encouraged to set
394 this unless you are on a platform requiring 16byte alignments. In
395 this case the alignment requirements turn out to negate any
396 potential advantages of decreasing size_t word size.
397
398 Implementors: Beware of the possible combinations of:
399 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
400 and might be the same width as int or as long
401 - size_t might have different width and signedness as INTERNAL_SIZE_T
402 - int and long might be 32 or 64 bits, and might be the same width
403 To deal with this, most comparisons and difference computations
404 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
405 aware of the fact that casting an unsigned int to a wider long does
406 not sign-extend. (This also makes checking for negative numbers
407 awkward.) Some of these casts result in harmless compiler warnings
408 on some systems.
409*/
410
411#ifndef INTERNAL_SIZE_T
412#define INTERNAL_SIZE_T size_t
413#endif
414
415/* The corresponding word size */
416#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
417
418
419
420/*
421 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
422 It must be a power of two at least 2 * SIZE_SZ, even on machines
423 for which smaller alignments would suffice. It may be defined as
424 larger than this though. Note however that code and data structures
425 are optimized for the case of 8-byte alignment.
426*/
427
428
429#ifndef MALLOC_ALIGNMENT
430#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
431#endif
432
433/* The corresponding bit mask value */
434#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
435
436
437
438/*
439 REALLOC_ZERO_BYTES_FREES should be set if a call to
440 realloc with zero bytes should be the same as a call to free.
441 Some people think it should. Otherwise, since this malloc
442 returns a unique pointer for malloc(0), so does realloc(p, 0).
443*/
444
445#define REALLOC_ZERO_BYTES_FREES
446
447/*
448 TRIM_FASTBINS controls whether free() of a very small chunk can
449 immediately lead to trimming. Setting to true (1) can reduce memory
450 footprint, but will almost always slow down programs that use a lot
451 of small chunks.
452
453 Define this only if you are willing to give up some speed to more
454 aggressively reduce system-level memory footprint when releasing
455 memory in programs that use many small chunks. You can get
456 essentially the same effect by setting MXFAST to 0, but this can
457 lead to even greater slowdowns in programs using many small chunks.
458 TRIM_FASTBINS is an in-between compile-time option, that disables
459 only those chunks bordering topmost memory from being placed in
460 fastbins.
461*/
462
463#ifndef TRIM_FASTBINS
464#define TRIM_FASTBINS 0
465#endif
466
467
468/*
469 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
470 This is necessary when you only want to use this malloc in one part
471 of a program, using your regular system malloc elsewhere.
472*/
473
474/* #define USE_DL_PREFIX */
475
476
477/*
478 USE_MALLOC_LOCK causes wrapper functions to surround each
479 callable routine with pthread mutex lock/unlock.
480
481 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
482*/
483
484/* #define USE_MALLOC_LOCK */
485
486
487/*
488 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
489 actually a wrapper function that first calls MALLOC_PREACTION, then
490 calls the internal routine, and follows it with
491 MALLOC_POSTACTION. This is needed for locking, but you can also use
492 this, without USE_MALLOC_LOCK, for purposes of interception,
493 instrumentation, etc. It is a sad fact that using wrappers often
494 noticeably degrades performance of malloc-intensive programs.
495*/
496
497
498#ifdef USE_MALLOC_LOCK
499#define USE_PUBLIC_MALLOC_WRAPPERS
500#else
501/* #define USE_PUBLIC_MALLOC_WRAPPERS */
502#endif
503
504
505/*
506 Two-phase name translation.
507 All of the actual routines are given mangled names.
508 When wrappers are used, they become the public callable versions.
509 When DL_PREFIX is used, the callable names are prefixed.
510*/
511
512#ifndef USE_PUBLIC_MALLOC_WRAPPERS
513#define cALLOc public_cALLOc
514#define fREe public_fREe
515#define mALLOc public_mALLOc
516#define mEMALIGn public_mEMALIGn
517#define posix_mEMALIGn public_posix_mEMALIGn
518#define rEALLOc public_rEALLOc
519#define vALLOc public_vALLOc
520#define pVALLOc public_pVALLOc
521#define mALLINFo public_mALLINFo
522#define mALLOPt public_mALLOPt
523#define mTRIm public_mTRIm
524#define mSTATs public_mSTATs
525#define mUSABLe public_mUSABLe
526#endif
527
528#ifdef USE_DL_PREFIX
529#define public_cALLOc dlcalloc
530#define public_fREe dlfree
531#define public_mALLOc dlmalloc
532#define public_mEMALIGn dlmemalign
533#define public_posix_mEMALIGn dlposix_memalign
534#define public_rEALLOc dlrealloc
535#define public_vALLOc dlvalloc
536#define public_pVALLOc dlpvalloc
537#define public_mALLINFo dlmallinfo
538#define public_mALLOPt dlmallopt
539#define public_mTRIm dlmalloc_trim
540#define public_mSTATs dlmalloc_stats
541#define public_mUSABLe dlmalloc_usable_size
542#else /* USE_DL_PREFIX */
543#define public_cALLOc calloc
544#define public_fREe free
545#define public_mALLOc malloc
546#define public_mEMALIGn memalign
547#define public_posix_mEMALIGn posix_memalign
548#define public_rEALLOc realloc
549#define public_vALLOc valloc
550#define public_pVALLOc pvalloc
551#define public_mALLINFo mallinfo
552#define public_mALLOPt mallopt
553#define public_mTRIm malloc_trim
554#define public_mSTATs malloc_stats
555#define public_mUSABLe malloc_usable_size
556#endif /* USE_DL_PREFIX */
557
558
559/*
560 HAVE_MEMCPY should be defined if you are not otherwise using
561 ANSI STD C, but still have memcpy and memset in your C library
562 and want to use them in calloc and realloc. Otherwise simple
563 macro versions are defined below.
564
565 USE_MEMCPY should be defined as 1 if you actually want to
566 have memset and memcpy called. People report that the macro
567 versions are faster than libc versions on some systems.
568
569 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
570 (of <= 36 bytes) are manually unrolled in realloc and calloc.
571*/
572
573#ifndef HAVE_MEMCPY
574#define HAVE_MEMCPY
575#endif
576
577#ifndef USE_MEMCPY
578#ifdef HAVE_MEMCPY
579#define USE_MEMCPY 1
580#else
581#define USE_MEMCPY 0
582#endif
583#endif
584
585
586#if (__STD_C || defined(HAVE_MEMCPY))
587
588#ifdef WIN32
[173]589 /* On Win32 memset and memcpy are already declared in windows.h */
[171]590#else
591#if __STD_C
[173]592 /* Defined in string.h */
[171]593#else
594Void_t* memset();
595Void_t* memcpy();
596#endif
597#endif
598#endif
599
600/*
601 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
602 malloc fails to be able to return memory, either because memory is
603 exhausted or because of illegal arguments.
604
605 By default, sets errno if running on STD_C platform, else does nothing.
606*/
607
608#ifndef MALLOC_FAILURE_ACTION
609#if __STD_C
610#define MALLOC_FAILURE_ACTION \
611 errno = ENOMEM;
612
613#else
614#define MALLOC_FAILURE_ACTION
615#endif
616#endif
617
618/*
619 MORECORE-related declarations. By default, rely on sbrk
620*/
621
622
623#if !defined(HAVE_UNISTD_H)
624#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
625#if __STD_C
626extern Void_t* sbrk(ptrdiff_t);
627#else
628extern Void_t* sbrk();
629#endif
630#endif
631#endif
632
633/*
634 MORECORE_FAILURE is the value returned upon failure of MORECORE
635 as well as mmap. Since it cannot be an otherwise valid memory address,
636 and must reflect values of standard sys calls, you probably ought not
637 try to redefine it.
638*/
639
640#ifndef MORECORE_FAILURE
641#define MORECORE_FAILURE ((void*)(-1UL))
642#endif
643
644/*
645 MORECORE is the name of the routine to call to obtain more memory
646 from the system. See below for general guidance on writing
647 alternative MORECORE functions, as well as a version for WIN32 and a
648 sample version for pre-OSX macos.
649*/
650
651#ifndef MORECORE
652#define MORECORE sbrk
653#endif
654
655
656/*
657 If MORECORE_CONTIGUOUS is true, take advantage of fact that
658 consecutive calls to MORECORE with positive arguments always return
659 contiguous increasing addresses. This is true of unix sbrk. Even
660 if not defined, when regions happen to be contiguous, malloc will
661 permit allocations spanning regions obtained from different
662 calls. But defining this when applicable enables some stronger
663 consistency checks and space efficiencies.
664*/
665
666#ifndef MORECORE_CONTIGUOUS
667#define MORECORE_CONTIGUOUS 1
668#endif
669
670/*
671 Define MORECORE_CANNOT_TRIM if your version of MORECORE
672 cannot release space back to the system when given negative
673 arguments. This is generally necessary only if you are using
674 a hand-crafted MORECORE function that cannot handle negative arguments.
675*/
676
677/* #define MORECORE_CANNOT_TRIM */
678
679
680/*
681 This malloc requires mmap for heap management data. It is an error
682 if mmap is not available.
683
684 Additionally, mmap will be used to satisfy large requests.
685*/
686
687#ifndef HAVE_MMAP
[172]688# error HAVE_MMAP not defined, has your operating system mmap?
[171]689#endif
690
691/*
692 Standard unix mmap using /dev/zero clears memory so calloc doesn't
693 need to.
694*/
695
696#ifndef MMAP_CLEARS
697#define MMAP_CLEARS 1
698#endif
699
700
701/*
702 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
703 sbrk fails, and mmap is used as a backup (which is done only if
704 HAVE_MMAP). The value must be a multiple of page size. This
705 backup strategy generally applies only when systems have "holes" in
706 address space, so sbrk cannot perform contiguous expansion, but
707 there is still space available on system. On systems for which
708 this is known to be useful (i.e. most linux kernels), this occurs
709 only when programs allocate huge amounts of memory. Between this,
710 and the fact that mmap regions tend to be limited, the size should
711 be large, to avoid too many mmap calls and thus avoid running out
712 of kernel resources.
713*/
714
715#ifndef MMAP_AS_MORECORE_SIZE
716#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
717#endif
718
719
720/*
721 The system page size. To the extent possible, this malloc manages
722 memory from the system in page-size units. Note that this value is
723 cached during initialization into a field of malloc_state. So even
724 if malloc_getpagesize is a function, it is only called once.
725
726 The following mechanics for getpagesize were adapted from bsd/gnu
727 getpagesize.h. If none of the system-probes here apply, a value of
728 4096 is used, which should be OK: If they don't apply, then using
729 the actual value probably doesn't impact performance.
730*/
731
732
733#ifndef malloc_getpagesize
734
735# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
736# ifndef _SC_PAGE_SIZE
737# define _SC_PAGE_SIZE _SC_PAGESIZE
738# endif
739# endif
740
741# ifdef _SC_PAGE_SIZE
742# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
743# else
744# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
745 extern size_t getpagesize();
746# define malloc_getpagesize getpagesize()
747# else
748# ifdef WIN32 /* use supplied emulation of getpagesize */
749# define malloc_getpagesize getpagesize()
750# else
751# if defined(HAVE_SYS_PARAM_H)
752# include <sys/param.h>
753# endif
754# ifdef EXEC_PAGESIZE
755# define malloc_getpagesize EXEC_PAGESIZE
756# else
757# ifdef NBPG
758# ifndef CLSIZE
759# define malloc_getpagesize NBPG
760# else
761# define malloc_getpagesize (NBPG * CLSIZE)
762# endif
763# else
764# ifdef NBPC
765# define malloc_getpagesize NBPC
766# else
767# ifdef PAGESIZE
768# define malloc_getpagesize PAGESIZE
769# else /* just guess */
770# define malloc_getpagesize (4096)
771# endif
772# endif
773# endif
774# endif
775# endif
776# endif
777# endif
778#endif
779
780/*
781 This version of malloc supports the standard SVID/XPG mallinfo
782 routine that returns a struct containing usage properties and
783 statistics. It should work on any SVID/XPG compliant system that has
784 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
785 install such a thing yourself, cut out the preliminary declarations
786 as described above and below and save them in a malloc.h file. But
787 there's no compelling reason to bother to do this.)
788
789 The main declaration needed is the mallinfo struct that is returned
790 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
791 bunch of fields that are not even meaningful in this version of
792 malloc. These fields are are instead filled by mallinfo() with
793 other numbers that might be of interest.
794
795 HAVE_MALLOC_H should be set if you have a
796 /usr/include/malloc.h file that includes a declaration of struct
797 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
798 version is declared below. These must be precisely the same for
799 mallinfo() to work. The original SVID version of this struct,
800 defined on most systems with mallinfo, declares all fields as
801 ints. But some others define as unsigned long. If your system
802 defines the fields using a type of different width than listed here,
803 you must #include your system version and #define
804 HAVE_MALLOC_H.
805*/
806
807/* #define HAVE_MALLOC_H */
808
809/* On *BSD, malloc.h is deprecated, and on some *BSD including
810 * it may actually raise an error.
811 */
812#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
813#include <malloc.h>
814#else
815
816/* SVID2/XPG mallinfo structure */
817
818struct mallinfo {
819 int arena; /* non-mmapped space allocated from system */
820 int ordblks; /* number of free chunks */
821 int smblks; /* number of fastbin blocks */
822 int hblks; /* number of mmapped regions */
823 int hblkhd; /* space in mmapped regions */
824 int usmblks; /* maximum total allocated space */
825 int fsmblks; /* space available in freed fastbin blocks */
826 int uordblks; /* total allocated space */
827 int fordblks; /* total free space */
828 int keepcost; /* top-most, releasable (via malloc_trim) space */
829};
830
831/*
832 SVID/XPG defines four standard parameter numbers for mallopt,
833 normally defined in malloc.h. Only one of these (M_MXFAST) is used
834 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
835 so setting them has no effect. But this malloc also supports other
836 options in mallopt described below.
837*/
838#endif
839
840
841/* ---------- description of public routines ------------ */
842
843/*
844 malloc(size_t n)
845 Returns a pointer to a newly allocated chunk of at least n bytes, or null
846 if no space is available. Additionally, on failure, errno is
847 set to ENOMEM on ANSI C systems.
848
849 If n is zero, malloc returns a minumum-sized chunk. (The minimum
850 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
851 systems.) On most systems, size_t is an unsigned type, so calls
852 with negative arguments are interpreted as requests for huge amounts
853 of space, which will often fail. The maximum supported value of n
854 differs across systems, but is in all cases less than the maximum
855 representable value of a size_t.
856*/
857#if __STD_C
858Void_t* public_mALLOc(size_t) __attribute_malloc__;
859#else
860Void_t* public_mALLOc();
861#endif
862
863/*
864 free(Void_t* p)
865 Releases the chunk of memory pointed to by p, that had been previously
866 allocated using malloc or a related routine such as realloc.
867 It has no effect if p is null. It can have arbitrary (i.e., bad!)
868 effects if p has already been freed.
869
870 Unless disabled (using mallopt), freeing very large spaces will
871 when possible, automatically trigger operations that give
872 back unused memory to the system, thus reducing program footprint.
873*/
874#if __STD_C
875void public_fREe(Void_t*);
876#else
877void public_fREe();
878#endif
879
880/*
881 calloc(size_t n_elements, size_t element_size);
882 Returns a pointer to n_elements * element_size bytes, with all locations
883 set to zero.
884*/
885#if __STD_C
886Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
887#else
888Void_t* public_cALLOc();
889#endif
890
891/*
892 realloc(Void_t* p, size_t n)
893 Returns a pointer to a chunk of size n that contains the same data
894 as does chunk p up to the minimum of (n, p's size) bytes, or null
895 if no space is available.
896
897 The returned pointer may or may not be the same as p. The algorithm
898 prefers extending p when possible, otherwise it employs the
899 equivalent of a malloc-copy-free sequence.
900
901 If p is null, realloc is equivalent to malloc.
902
903 If space is not available, realloc returns null, errno is set (if on
904 ANSI) and p is NOT freed.
905
906 if n is for fewer bytes than already held by p, the newly unused
907 space is lopped off and freed if possible. Unless the #define
908 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
909 zero (re)allocates a minimum-sized chunk.
910
911 Large chunks that were internally obtained via mmap will always
912 be reallocated using malloc-copy-free sequences unless
913 the system supports MREMAP (currently only linux).
914
915 The old unix realloc convention of allowing the last-free'd chunk
916 to be used as an argument to realloc is not supported.
917*/
918#if __STD_C
919Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
920#else
921Void_t* public_rEALLOc();
922#endif
923
924/*
925 memalign(size_t alignment, size_t n);
926 Returns a pointer to a newly allocated chunk of n bytes, aligned
927 in accord with the alignment argument.
928
929 The alignment argument should be a power of two. If the argument is
930 not a power of two, the nearest greater power is used.
931 8-byte alignment is guaranteed by normal malloc calls, so don't
932 bother calling memalign with an argument of 8 or less.
933
934 Overreliance on memalign is a sure way to fragment space.
935*/
936#if __STD_C
937Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
938#else
939Void_t* public_mEMALIGn();
940#endif
941
942/*
943 posix_memalign(void** memptr, size_t alignment, size_t n);
944 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
945 in accord with the alignment argument. Returns 0 on success, otherwise
946 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
947
948 The alignment must be a power of two, and a multiple of sizeof(void *).
949*/
950#if __STD_C
951int public_posix_mEMALIGn(Void_t**, size_t, size_t);
952#else
953int public_posix_mEMALIGn();
954#endif
955
956/*
957 valloc(size_t n);
958 Equivalent to memalign(pagesize, n), where pagesize is the page
959 size of the system. If the pagesize is unknown, 4096 is used.
960*/
961#if __STD_C
962Void_t* public_vALLOc(size_t) __attribute_malloc__;
963#else
964Void_t* public_vALLOc();
965#endif
966
967
968
969/*
970 mallopt(int parameter_number, int parameter_value)
971 Sets tunable parameters The format is to provide a
972 (parameter-number, parameter-value) pair. mallopt then sets the
973 corresponding parameter to the argument value if it can (i.e., so
974 long as the value is meaningful), and returns 1 if successful else
975 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
976 normally defined in malloc.h. Only one of these (M_MXFAST) is used
977 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
978 so setting them has no effect. But this malloc also supports four
979 other options in mallopt. See below for details. Briefly, supported
980 parameters are as follows (listed defaults are for "typical"
981 configurations).
982
983 Symbol param # default allowed param values
984 M_MXFAST 1 64 0-80 (0 disables fastbins)
985 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
986 M_TOP_PAD -2 0 any
987 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
988 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
989*/
990#if __STD_C
991int public_mALLOPt(int, int);
992#else
993int public_mALLOPt();
994#endif
995
996
997/*
998 mallinfo()
999 Returns (by copy) a struct containing various summary statistics:
1000
1001 arena: current total non-mmapped bytes allocated from system
1002 ordblks: the number of free chunks
1003 smblks: the number of fastbin blocks (i.e., small chunks that
1004 have been freed but not use resused or consolidated)
1005 hblks: current number of mmapped regions
1006 hblkhd: total bytes held in mmapped regions
1007 usmblks: the maximum total allocated space. This will be greater
1008 than current total if trimming has occurred.
1009 fsmblks: total bytes held in fastbin blocks
1010 uordblks: current total allocated space (normal or mmapped)
1011 fordblks: total free space
1012 keepcost: the maximum number of bytes that could ideally be released
1013 back to system via malloc_trim. ("ideally" means that
1014 it ignores page restrictions etc.)
1015
1016 Because these fields are ints, but internal bookkeeping may
1017 be kept as longs, the reported values may wrap around zero and
1018 thus be inaccurate.
1019*/
1020#if __STD_C
1021struct mallinfo public_mALLINFo(void);
1022#else
1023struct mallinfo public_mALLINFo();
1024#endif
1025
1026/*
1027 pvalloc(size_t n);
1028 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1029 round up n to nearest pagesize.
1030 */
1031#if __STD_C
1032Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1033#else
1034Void_t* public_pVALLOc();
1035#endif
1036
1037/*
1038 malloc_trim(size_t pad);
1039
1040 If possible, gives memory back to the system (via negative
1041 arguments to sbrk) if there is unused memory at the `high' end of
1042 the malloc pool. You can call this after freeing large blocks of
1043 memory to potentially reduce the system-level memory requirements
1044 of a program. However, it cannot guarantee to reduce memory. Under
1045 some allocation patterns, some large free blocks of memory will be
1046 locked between two used chunks, so they cannot be given back to
1047 the system.
1048
1049 The `pad' argument to malloc_trim represents the amount of free
1050 trailing space to leave untrimmed. If this argument is zero,
1051 only the minimum amount of memory to maintain internal data
1052 structures will be left (one page or less). Non-zero arguments
1053 can be supplied to maintain enough trailing space to service
1054 future expected allocations without having to re-obtain memory
1055 from the system.
1056
1057 Malloc_trim returns 1 if it actually released any memory, else 0.
1058 On systems that do not support "negative sbrks", it will always
1059 rreturn 0.
1060*/
1061#if __STD_C
1062int public_mTRIm(size_t);
1063#else
1064int public_mTRIm();
1065#endif
1066
1067/*
1068 malloc_usable_size(Void_t* p);
1069
1070 Returns the number of bytes you can actually use in
1071 an allocated chunk, which may be more than you requested (although
1072 often not) due to alignment and minimum size constraints.
1073 You can use this many bytes without worrying about
1074 overwriting other allocated objects. This is not a particularly great
1075 programming practice. malloc_usable_size can be more useful in
1076 debugging and assertions, for example:
1077
1078 p = malloc(n);
1079 assert(malloc_usable_size(p) >= 256);
1080
1081*/
1082#if __STD_C
1083size_t public_mUSABLe(Void_t*);
1084#else
1085size_t public_mUSABLe();
1086#endif
1087
1088/*
1089 malloc_stats();
1090 Prints on stderr the amount of space obtained from the system (both
1091 via sbrk and mmap), the maximum amount (which may be more than
1092 current if malloc_trim and/or munmap got called), and the current
1093 number of bytes allocated via malloc (or realloc, etc) but not yet
1094 freed. Note that this is the number of bytes allocated, not the
1095 number requested. It will be larger than the number requested
1096 because of alignment and bookkeeping overhead. Because it includes
1097 alignment wastage as being in use, this figure may be greater than
1098 zero even when no user-level chunks are allocated.
1099
1100 The reported current and maximum system memory can be inaccurate if
1101 a program makes other calls to system memory allocation functions
1102 (normally sbrk) outside of malloc.
1103
1104 malloc_stats prints only the most commonly interesting statistics.
1105 More information can be obtained by calling mallinfo.
1106
1107*/
1108#if __STD_C
1109void public_mSTATs();
1110#else
1111void public_mSTATs();
1112#endif
1113
1114/* mallopt tuning options */
1115
1116/*
1117 M_MXFAST is the maximum request size used for "fastbins", special bins
1118 that hold returned chunks without consolidating their spaces. This
1119 enables future requests for chunks of the same size to be handled
1120 very quickly, but can increase fragmentation, and thus increase the
1121 overall memory footprint of a program.
1122
1123 This malloc manages fastbins very conservatively yet still
1124 efficiently, so fragmentation is rarely a problem for values less
1125 than or equal to the default. The maximum supported value of MXFAST
1126 is 80. You wouldn't want it any higher than this anyway. Fastbins
1127 are designed especially for use with many small structs, objects or
1128 strings -- the default handles structs/objects/arrays with sizes up
1129 to 16 4byte fields, or small strings representing words, tokens,
1130 etc. Using fastbins for larger objects normally worsens
1131 fragmentation without improving speed.
1132
1133 M_MXFAST is set in REQUEST size units. It is internally used in
1134 chunksize units, which adds padding and alignment. You can reduce
1135 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1136 algorithm to be a closer approximation of fifo-best-fit in all cases,
1137 not just for larger requests, but will generally cause it to be
1138 slower.
1139*/
1140
1141
1142/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1143#ifndef M_MXFAST
1144#define M_MXFAST 1
1145#endif
1146
1147#ifndef DEFAULT_MXFAST
1148#define DEFAULT_MXFAST 64
1149#endif
1150
1151
1152/*
1153 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1154 to keep before releasing via malloc_trim in free().
1155
1156 Automatic trimming is mainly useful in long-lived programs.
1157 Because trimming via sbrk can be slow on some systems, and can
1158 sometimes be wasteful (in cases where programs immediately
1159 afterward allocate more large chunks) the value should be high
1160 enough so that your overall system performance would improve by
1161 releasing this much memory.
1162
1163 The trim threshold and the mmap control parameters (see below)
1164 can be traded off with one another. Trimming and mmapping are
1165 two different ways of releasing unused memory back to the
1166 system. Between these two, it is often possible to keep
1167 system-level demands of a long-lived program down to a bare
1168 minimum. For example, in one test suite of sessions measuring
1169 the XF86 X server on Linux, using a trim threshold of 128K and a
1170 mmap threshold of 192K led to near-minimal long term resource
1171 consumption.
1172
1173 If you are using this malloc in a long-lived program, it should
1174 pay to experiment with these values. As a rough guide, you
1175 might set to a value close to the average size of a process
1176 (program) running on your system. Releasing this much memory
1177 would allow such a process to run in memory. Generally, it's
1178 worth it to tune for trimming rather tham memory mapping when a
1179 program undergoes phases where several large chunks are
1180 allocated and released in ways that can reuse each other's
1181 storage, perhaps mixed with phases where there are no such
1182 chunks at all. And in well-behaved long-lived programs,
1183 controlling release of large blocks via trimming versus mapping
1184 is usually faster.
1185
1186 However, in most programs, these parameters serve mainly as
1187 protection against the system-level effects of carrying around
1188 massive amounts of unneeded memory. Since frequent calls to
1189 sbrk, mmap, and munmap otherwise degrade performance, the default
1190 parameters are set to relatively high values that serve only as
1191 safeguards.
1192
1193 The trim value must be greater than page size to have any useful
1194 effect. To disable trimming completely, you can set to
1195 (unsigned long)(-1)
1196
1197 Trim settings interact with fastbin (MXFAST) settings: Unless
1198 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1199 freeing a chunk with size less than or equal to MXFAST. Trimming is
1200 instead delayed until subsequent freeing of larger chunks. However,
1201 you can still force an attempted trim by calling malloc_trim.
1202
1203 Also, trimming is not generally possible in cases where
1204 the main arena is obtained via mmap.
1205
1206 Note that the trick some people use of mallocing a huge space and
1207 then freeing it at program startup, in an attempt to reserve system
1208 memory, doesn't have the intended effect under automatic trimming,
1209 since that memory will immediately be returned to the system.
1210*/
1211
1212#define M_TRIM_THRESHOLD -1
1213
1214#ifndef DEFAULT_TRIM_THRESHOLD
1215#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1216#endif
1217
1218/*
1219 M_TOP_PAD is the amount of extra `padding' space to allocate or
1220 retain whenever sbrk is called. It is used in two ways internally:
1221
1222 * When sbrk is called to extend the top of the arena to satisfy
1223 a new malloc request, this much padding is added to the sbrk
1224 request.
1225
1226 * When malloc_trim is called automatically from free(),
1227 it is used as the `pad' argument.
1228
1229 In both cases, the actual amount of padding is rounded
1230 so that the end of the arena is always a system page boundary.
1231
1232 The main reason for using padding is to avoid calling sbrk so
1233 often. Having even a small pad greatly reduces the likelihood
1234 that nearly every malloc request during program start-up (or
1235 after trimming) will invoke sbrk, which needlessly wastes
1236 time.
1237
1238 Automatic rounding-up to page-size units is normally sufficient
1239 to avoid measurable overhead, so the default is 0. However, in
1240 systems where sbrk is relatively slow, it can pay to increase
1241 this value, at the expense of carrying around more memory than
1242 the program needs.
1243*/
1244
1245#define M_TOP_PAD -2
1246
1247#ifndef DEFAULT_TOP_PAD
1248#define DEFAULT_TOP_PAD (0)
1249#endif
1250
1251/*
1252 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1253 to service a request. Requests of at least this size that cannot
1254 be allocated using already-existing space will be serviced via mmap.
1255 (If enough normal freed space already exists it is used instead.)
1256
1257 Using mmap segregates relatively large chunks of memory so that
1258 they can be individually obtained and released from the host
1259 system. A request serviced through mmap is never reused by any
1260 other request (at least not directly; the system may just so
1261 happen to remap successive requests to the same locations).
1262
1263 Segregating space in this way has the benefits that:
1264
1265 1. Mmapped space can ALWAYS be individually released back
1266 to the system, which helps keep the system level memory
1267 demands of a long-lived program low.
1268 2. Mapped memory can never become `locked' between
1269 other chunks, as can happen with normally allocated chunks, which
1270 means that even trimming via malloc_trim would not release them.
1271 3. On some systems with "holes" in address spaces, mmap can obtain
1272 memory that sbrk cannot.
1273
1274 However, it has the disadvantages that:
1275
1276 1. The space cannot be reclaimed, consolidated, and then
1277 used to service later requests, as happens with normal chunks.
1278 2. It can lead to more wastage because of mmap page alignment
1279 requirements
1280 3. It causes malloc performance to be more dependent on host
1281 system memory management support routines which may vary in
1282 implementation quality and may impose arbitrary
1283 limitations. Generally, servicing a request via normal
1284 malloc steps is faster than going through a system's mmap.
1285
1286 The advantages of mmap nearly always outweigh disadvantages for
1287 "large" chunks, but the value of "large" varies across systems. The
1288 default is an empirically derived value that works well in most
1289 systems.
1290*/
1291
1292#define M_MMAP_THRESHOLD -3
1293
1294#ifndef DEFAULT_MMAP_THRESHOLD
1295#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1296#endif
1297
1298/*
1299 M_MMAP_MAX is the maximum number of requests to simultaneously
1300 service using mmap. This parameter exists because
1301. Some systems have a limited number of internal tables for
1302 use by mmap, and using more than a few of them may degrade
1303 performance.
1304
1305 The default is set to a value that serves only as a safeguard.
1306 Setting to 0 disables use of mmap for servicing large requests. If
1307 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1308 to non-zero values in mallopt will fail.
1309*/
1310
1311#define M_MMAP_MAX -4
1312
1313#ifndef DEFAULT_MMAP_MAX
1314#define DEFAULT_MMAP_MAX (65536)
1315#endif
1316
1317#ifdef __cplusplus
1318}; /* end of extern "C" */
1319#endif
1320
1321/*
1322 ========================================================================
1323 To make a fully customizable malloc.h header file, cut everything
1324 above this line, put into file malloc.h, edit to suit, and #include it
1325 on the next line, as well as in programs that use this malloc.
1326 ========================================================================
1327*/
1328
1329/* #include "malloc.h" */
1330
1331/* --------------------- public wrappers ---------------------- */
1332
1333#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1334
1335/* DL_STATIC used to make functions (deep down) consistent
1336 * with prototypes (otherwise the prototypes are static
1337 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1338 * The gcc compiler doesn't care, but the HP-UX compiler does.
1339 */
1340#define DL_STATIC static
1341
1342/* Declare all routines as internal */
1343#if __STD_C
1344static Void_t* mALLOc(size_t) __attribute_malloc__;
1345static void fREe(Void_t*);
1346static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1347static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1348static int posix_mEMALIGn(Void_t**, size_t, size_t);
1349static Void_t* vALLOc(size_t) __attribute_malloc__;
1350static Void_t* pVALLOc(size_t) __attribute_malloc__;
1351static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1352static int mTRIm(size_t);
1353static size_t mUSABLe(Void_t*);
1354static void mSTATs();
1355static int mALLOPt(int, int);
1356static struct mallinfo mALLINFo(void);
1357#else
1358static Void_t* mALLOc();
1359static void fREe();
1360static Void_t* rEALLOc();
1361static Void_t* mEMALIGn();
1362static int posix_mEMALIGn();
1363static Void_t* vALLOc();
1364static Void_t* pVALLOc();
1365static Void_t* cALLOc();
1366static int mTRIm();
1367static size_t mUSABLe();
1368static void mSTATs();
1369static int mALLOPt();
1370static struct mallinfo mALLINFo();
1371#endif
1372
1373/*
1374 MALLOC_PREACTION and MALLOC_POSTACTION should be
1375 defined to return 0 on success, and nonzero on failure.
1376 The return value of MALLOC_POSTACTION is currently ignored
1377 in wrapper functions since there is no reasonable default
1378 action to take on failure.
1379*/
1380
1381
1382#ifdef USE_MALLOC_LOCK
1383
1384# ifdef WIN32
1385
1386static int mALLOC_MUTEx;
1387#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1388#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1389int dnmalloc_pthread_init(void) { return 0; }
1390
1391# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1392
1393# if defined(__NetBSD__)
1394#include <reentrant.h>
1395extern int __isthreaded;
1396static mutex_t thread_lock = MUTEX_INITIALIZER;
1397#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1398#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1399void _malloc_prefork(void) { _MALLOC_LOCK(); }
1400void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1401# endif
1402
1403# if defined(__OpenBSD__)
1404extern int __isthreaded;
1405void _thread_malloc_lock(void);
1406void _thread_malloc_unlock(void);
1407#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1408#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1409# endif
1410
1411# if defined(__FreeBSD__)
1412extern int __isthreaded;
1413struct _spinlock {
1414 volatile long access_lock;
1415 volatile long lock_owner;
1416 volatile char *fname;
1417 volatile int lineno;
1418};
1419typedef struct _spinlock spinlock_t;
1420#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1421void _spinlock(spinlock_t *);
1422void _spinunlock(spinlock_t *);
1423/* # include "/usr/src/lib/libc/include/spinlock.h" */
1424static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1425spinlock_t *__malloc_lock = &thread_lock;
1426#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1427#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1428# endif
1429
1430/* Common for all three *BSD
1431 */
1432static int malloc_active = 0;
1433static int dnmalloc_mutex_lock()
1434{
1435 _MALLOC_LOCK();
1436 if (!malloc_active)
1437 {
1438 ++malloc_active;
1439 return 0;
1440 }
1441 assert(malloc_active == 0);
1442 _MALLOC_UNLOCK();
1443 errno = EDEADLK;
1444 return 1;
1445}
1446static int dnmalloc_mutex_unlock()
1447{
1448 --malloc_active;
1449 _MALLOC_UNLOCK();
1450 return 0;
1451}
1452#define MALLOC_PREACTION dnmalloc_mutex_lock()
1453#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1454int dnmalloc_pthread_init(void) { return 0; }
1455
1456# else
1457
1458/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1459 *
1460 * Works fine on linux (no malloc in pthread_mutex_lock)
1461 * Works with on HP-UX if initialized after entering main()
1462 */
1463#include <pthread.h>
1464static int malloc_active = 0;
1465void dnmalloc_fork_prepare(void);
1466void dnmalloc_fork_parent(void);
1467void dnmalloc_fork_child(void);
1468
1469#if !defined(__linux__)
1470
1471static pthread_mutex_t mALLOC_MUTEx;
1472pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1473static int dnmalloc_use_mutex = 0;
1474static void dnmalloc_pthread_init_int(void)
1475{
1476 pthread_mutexattr_t mta;
1477 pthread_mutexattr_init(&mta);
1478 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1479 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1480 pthread_mutexattr_destroy(&mta);
1481 pthread_atfork(dnmalloc_fork_prepare,
1482 dnmalloc_fork_parent,
1483 dnmalloc_fork_child);
1484 dnmalloc_use_mutex = 1;
1485}
1486int dnmalloc_pthread_init(void)
1487{
1488 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1489}
1490
1491#else
1492
1493static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1494static int dnmalloc_use_mutex = 1;
1495int dnmalloc_pthread_init(void) {
1496 return pthread_atfork(dnmalloc_fork_prepare,
1497 dnmalloc_fork_parent,
1498 dnmalloc_fork_child);
1499}
1500#endif /* !defined(__linux__) */
1501
1502void dnmalloc_fork_prepare(void) {
1503 if (dnmalloc_use_mutex)
1504 pthread_mutex_lock(&mALLOC_MUTEx);
1505}
1506void dnmalloc_fork_parent(void) {
1507 if (dnmalloc_use_mutex)
1508 pthread_mutex_unlock(&mALLOC_MUTEx);
1509}
[174]1510void dnmalloc_fork_child(void) {
[180]1511 int rc = 0;
[171]1512#ifdef __GLIBC__
1513 if (dnmalloc_use_mutex)
[174]1514 {
1515 pthread_mutex_unlock (&mALLOC_MUTEx);
1516 pthread_mutex_destroy(&mALLOC_MUTEx);
1517 rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1518 }
[171]1519#else
1520 if (dnmalloc_use_mutex)
[174]1521 rc = pthread_mutex_unlock(&mALLOC_MUTEx);
[171]1522#endif
[174]1523 if (rc != 0)
1524 {
1525 fputs("fork_child failed", stderr);
1526 _exit(EXIT_FAILURE);
1527 }
[171]1528}
1529static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1530{
1531 if (dnmalloc_use_mutex)
1532 {
1533 int rc = pthread_mutex_lock(mutex);
1534 if (rc == 0)
1535 {
1536 if (!malloc_active)
1537 {
1538 ++malloc_active;
1539 return 0;
1540 }
1541 assert(malloc_active == 0);
1542 (void) pthread_mutex_unlock(mutex);
1543 errno = EDEADLK;
1544 return 1;
1545 }
1546 return rc;
1547 }
1548 return 0;
1549}
1550static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1551{
1552 if (dnmalloc_use_mutex)
1553 {
1554 --malloc_active;
1555 return pthread_mutex_unlock(mutex);
1556 }
1557 return 0;
1558}
1559# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1560# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1561
1562# endif
1563
1564#else
1565
1566/* Substitute anything you like for these */
1567
1568# define MALLOC_PREACTION (0)
1569# define MALLOC_POSTACTION (0)
1570int dnmalloc_pthread_init(void) { return 0; }
1571
1572#endif /* USE_MALLOC_LOCK */
1573
1574Void_t* public_mALLOc(size_t bytes) {
1575 Void_t* m;
1576 if (MALLOC_PREACTION == 0) {
1577 m = mALLOc(bytes);
1578 (void) MALLOC_POSTACTION;
1579 return m;
1580 }
1581 return 0;
1582}
1583
1584void public_fREe(Void_t* m) {
1585 if (MALLOC_PREACTION == 0) {
1586 fREe(m);
1587 (void) MALLOC_POSTACTION;
1588 }
1589}
1590
1591Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1592 if (MALLOC_PREACTION == 0) {
1593 m = rEALLOc(m, bytes);
1594 (void) MALLOC_POSTACTION;
1595 return m;
1596 }
1597 return 0;
1598}
1599
1600Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1601 Void_t* m;
1602 if (MALLOC_PREACTION == 0) {
1603 m = mEMALIGn(alignment, bytes);
1604 (void) MALLOC_POSTACTION;
1605 return m;
1606 }
1607 return 0;
1608}
1609
1610int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1611 int m, ret;
1612 if ((ret = MALLOC_PREACTION) == 0) {
1613 m = posix_mEMALIGn(memptr, alignment, bytes);
1614 (void) MALLOC_POSTACTION;
1615 return m;
1616 }
1617 return ret;
1618}
1619
1620Void_t* public_vALLOc(size_t bytes) {
1621 Void_t* m;
1622 if (MALLOC_PREACTION == 0) {
1623 m = vALLOc(bytes);
1624 (void) MALLOC_POSTACTION;
1625 return m;
1626 }
1627 return 0;
1628}
1629
1630Void_t* public_pVALLOc(size_t bytes) {
1631 Void_t* m;
1632 if (MALLOC_PREACTION == 0) {
1633 m = pVALLOc(bytes);
1634 (void) MALLOC_POSTACTION;
1635 return m;
1636 }
1637 return 0;
1638}
1639
1640Void_t* public_cALLOc(size_t n, size_t elem_size) {
1641 Void_t* m;
1642 if (MALLOC_PREACTION == 0) {
1643 m = cALLOc(n, elem_size);
1644 (void) MALLOC_POSTACTION;
1645 return m;
1646 }
1647 return 0;
1648}
1649
1650int public_mTRIm(size_t s) {
1651 int result;
1652 if (MALLOC_PREACTION == 0) {
1653 result = mTRIm(s);
1654 (void) MALLOC_POSTACTION;
1655 return result;
1656 }
1657 return 0;
1658}
1659
1660size_t public_mUSABLe(Void_t* m) {
1661 size_t result;
1662 if (MALLOC_PREACTION == 0) {
1663 result = mUSABLe(m);
1664 (void) MALLOC_POSTACTION;
1665 return result;
1666 }
1667 return 0;
1668}
1669
1670void public_mSTATs() {
1671 if (MALLOC_PREACTION == 0) {
1672 mSTATs();
1673 (void) MALLOC_POSTACTION;
1674 }
1675}
1676
1677struct mallinfo public_mALLINFo() {
1678 struct mallinfo m;
1679 if (MALLOC_PREACTION == 0) {
1680 m = mALLINFo();
1681 (void) MALLOC_POSTACTION;
1682 return m;
1683 } else {
1684 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1685 return nm;
1686 }
1687}
1688
1689int public_mALLOPt(int p, int v) {
1690 int result;
1691 if (MALLOC_PREACTION == 0) {
1692 result = mALLOPt(p, v);
1693 (void) MALLOC_POSTACTION;
1694 return result;
1695 }
1696 return 0;
1697}
1698
1699#else
1700
1701int dnmalloc_pthread_init(void) { return 0; }
1702#define DL_STATIC
1703
1704#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1705
1706
1707
1708/* ------------- Optional versions of memcopy ---------------- */
1709
1710
1711#if USE_MEMCPY
1712
1713/*
1714 Note: memcpy is ONLY invoked with non-overlapping regions,
1715 so the (usually slower) memmove is not needed.
1716*/
1717
1718#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1719#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1720
1721#else /* !USE_MEMCPY */
1722
1723/* Use Duff's device for good zeroing/copying performance. */
1724
1725#define MALLOC_ZERO(charp, nbytes) \
1726do { \
1727 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1728 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1729 long mcn; \
1730 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1731 switch (mctmp) { \
1732 case 0: for(;;) { *mzp++ = 0; \
1733 case 7: *mzp++ = 0; \
1734 case 6: *mzp++ = 0; \
1735 case 5: *mzp++ = 0; \
1736 case 4: *mzp++ = 0; \
1737 case 3: *mzp++ = 0; \
1738 case 2: *mzp++ = 0; \
1739 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1740 } \
1741} while(0)
1742
1743#define MALLOC_COPY(dest,src,nbytes) \
1744do { \
1745 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1746 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1747 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1748 long mcn; \
1749 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1750 switch (mctmp) { \
1751 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1752 case 7: *mcdst++ = *mcsrc++; \
1753 case 6: *mcdst++ = *mcsrc++; \
1754 case 5: *mcdst++ = *mcsrc++; \
1755 case 4: *mcdst++ = *mcsrc++; \
1756 case 3: *mcdst++ = *mcsrc++; \
1757 case 2: *mcdst++ = *mcsrc++; \
1758 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1759 } \
1760} while(0)
1761
1762#endif
1763
1764/* ------------------ MMAP support ------------------ */
1765
1766
1767#if defined(HAVE_FCNTL_H)
1768#include <fcntl.h>
1769#endif
1770
1771#if defined(HAVE_SYS_MMAN_H)
1772#include <sys/mman.h>
1773#endif
1774
1775#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1776#define MAP_ANONYMOUS MAP_ANON
1777#endif
1778
1779/*
1780 Nearly all versions of mmap support MAP_ANONYMOUS,
1781 so the following is unlikely to be needed, but is
1782 supplied just in case.
1783*/
1784
1785#ifndef MAP_ANONYMOUS
1786
1787/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1788 */
1789void * anon_mmap (void *addr, size_t length, int prot, int flags)
1790{
1791 void * retval = NULL;
1792 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1793
1794 dev_zero_fd = open("/dev/zero", O_RDWR);
1795 if (dev_zero_fd >= 0)
1796 {
1797 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1798 /* closing the file descriptor does not unmap the region */
1799 close(dev_zero_fd);
1800 }
1801 return retval;
1802}
1803
1804#define MMAP(addr, size, prot, flags) \
1805 (anon_mmap((addr), (size), (prot), (flags)))
1806
1807
1808#else /* have MAP_ANONYMOUS */
1809
1810#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1811#define MAP_32BIT MAP_ADDR32
1812#endif
1813
1814#if defined(MAP_32BIT)
1815#define MMAP(addr, size, prot, flags) \
1816 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1817#elif defined(__sun)
1818/*
1819 * Hint an address within 32bit address space
1820 */
1821#define MMAP(addr, size, prot, flags) \
1822 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1823#else
1824/* *BSD */
1825#define MMAP(addr, size, prot, flags) \
1826 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1827#endif
1828
1829#endif /* have MAP_ANONYMOUS */
1830
1831
1832/*
1833 ----------------------- Chunk representations -----------------------
1834*/
1835
1836typedef void * mchunkptr;
1837
1838struct chunkinfo {
1839 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1840 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1841 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1842 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1843 in the linked list if the hash
1844 value is the same as the chunk */
1845 struct chunkinfo* fd; /* double links -- used only if free. */
1846 struct chunkinfo* bk;
1847 mchunkptr chunk;
1848};
1849
1850typedef struct chunkinfo* chunkinfoptr;
1851
1852struct cireginfo {
1853 unsigned long position;
1854 unsigned long *freebitmap;
1855 struct cireginfo* next;
1856 struct chunkinfo *freelist;
1857 struct chunkinfo *begin;
1858 unsigned long freecounter;
1859};
1860
1861/*
1862 ---------- Size and alignment checks and conversions ----------
1863*/
1864
1865/* conversion from malloc headers to user pointers, and back */
1866#define chunk(p) (p->chunk)
1867
1868
1869#define chunk2mem(p) (chunk(p))
1870#define mem2chunk(mem) (hashtable_lookup(mem))
1871
1872/* The smallest possible chunk */
1873#define MIN_CHUNK_SIZE 16
1874
1875/* The smallest size we can malloc is an aligned minimal chunk */
1876
1877#define MINSIZE \
1878 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1879
1880/* Check if m has acceptable alignment */
1881
1882#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1883
1884#define GUARD_SIZE 4
1885
1886/*
1887 Check if a request is so large that it would wrap around zero when
1888 padded and aligned. To simplify some other code, the bound is made
1889 low enough so that adding MINSIZE will also not wrap around zero.
1890
1891 Make it 4*MINSIZE.
1892*/
1893
1894#define REQUEST_OUT_OF_RANGE(req) \
1895 ((CHUNK_SIZE_T)(req) >= \
1896 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1897
1898/* pad request bytes into a usable size -- internal version */
1899
1900#define request2size(req) \
1901 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1902 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1903 MINSIZE)
1904
1905/* Same, except also perform argument check */
1906
1907#define checked_request2size(req, sz) \
1908 if (!REQUEST_OUT_OF_RANGE(req)) { \
1909 (sz) = request2size(req); \
1910 assert((sz-req) >= GUARD_SIZE); \
1911 } else { \
1912 MALLOC_FAILURE_ACTION; \
1913 return 0; \
1914 }
1915
1916#if PARANOIA > 2
1917static char * guard_set_p;
1918static char * guard_set_q;
1919
1920#define guard_set(guard, P, request, sz) \
1921 assert((sz-request) >= GUARD_SIZE); \
1922 guard_set_p = (char*)(chunk(P)); \
1923 guard_set_p += request; \
[172]1924 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
[171]1925 guard_set_q = (char*)(guard); \
1926 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1927 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1928 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1929 *guard_set_p = *guard_set_q; \
[172]1930 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
[171]1931 (P)->req = request
1932
1933#define guard_check(guard, P) \
[172]1934 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1935 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1936 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
[171]1937
1938#else
1939#define guard_set(guard, P, request, sz) ((void)0)
1940#define guard_check(guard, P) ((void)0)
1941#endif /* PARANOIA > 2 */
1942
1943/* dnmalloc forward declarations */
1944static char * dnmalloc_arc4random(void);
1945static void dnmalloc_init (void);
1946static void malloc_mmap_state(void);
1947static void cireg_extend (void);
1948static chunkinfoptr cireg_getfree (void);
1949static void hashtable_add (chunkinfoptr ci);
1950static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1951static void hashtable_remove (mchunkptr p);
1952static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1953static chunkinfoptr hashtable_lookup (mchunkptr p);
1954static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1955static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1956
1957
1958
1959/*
1960 --------------- Physical chunk operations ---------------
1961*/
1962
1963
1964/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1965#define PREV_INUSE 0x1
1966
1967/* extract inuse bit of previous chunk */
1968#define prev_inuse(p) ((p)->size & PREV_INUSE)
1969
1970/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1971#define IS_MMAPPED 0x2
1972
1973/* check for mmap()'ed chunk */
1974#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1975
1976
1977/* size field is or'ed when the chunk is in use */
1978#define INUSE 0x4
1979
1980/* extract inuse bit of chunk */
1981#define inuse(p) ((p)->size & INUSE)
1982
1983/*
1984 Bits to mask off when extracting size
1985
1986 Note: IS_MMAPPED is intentionally not masked off from size field in
1987 macros for which mmapped chunks should never be seen. This should
1988 cause helpful core dumps to occur if it is tried by accident by
1989 people extending or adapting this malloc.
1990*/
1991#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
1992
1993/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
1994#define SIZE_NOMMAP (PREV_INUSE|INUSE)
1995
1996/* Get size, ignoring use bits */
1997#define chunksize(p) ((p)->size & ~(SIZE_BITS))
1998
1999/* Ptr to chunkinfo of next physical malloc_chunk. */
2000#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2001
2002/* Treat space at ptr + offset as a chunk */
2003#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2004
2005/* set/clear chunk as being inuse without otherwise disturbing */
2006#define set_inuse(p) ((p)->size |= INUSE)
2007
2008#define clear_inuse(p) ((p)->size &= ~(INUSE))
2009
2010#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2011
2012#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2013
2014static void set_previnuse_next (chunkinfoptr p)
2015{
2016 chunkinfoptr q;
2017 q = next_chunkinfo (p);
2018 if (q)
2019 set_previnuse (q);
2020}
2021
2022#define set_all_inuse(p) \
2023set_inuse(p); \
2024set_previnuse_next(p);
2025
2026
2027/* Set size at head, without disturbing its use bit */
2028#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2029
2030/* Set size/use field */
2031#define set_head(p, s) ((p)->size = (s))
2032
2033/*
2034 Bins
2035
2036 An array of bin headers for free chunks. Each bin is doubly
2037 linked. The bins are approximately proportionally (log) spaced.
2038 There are a lot of these bins (128). This may look excessive, but
2039 works very well in practice. Most bins hold sizes that are
2040 unusual as malloc request sizes, but are more usual for fragments
2041 and consolidated sets of chunks, which is what these bins hold, so
2042 they can be found quickly. All procedures maintain the invariant
2043 that no consolidated chunk physically borders another one, so each
2044 chunk in a list is known to be preceeded and followed by either
2045 inuse chunks or the ends of memory.
2046
2047 Chunks in bins are kept in size order, with ties going to the
2048 approximately least recently used chunk. Ordering isn't needed
2049 for the small bins, which all contain the same-sized chunks, but
2050 facilitates best-fit allocation for larger chunks. These lists
2051 are just sequential. Keeping them in order almost never requires
2052 enough traversal to warrant using fancier ordered data
2053 structures.
2054
2055 Chunks of the same size are linked with the most
2056 recently freed at the front, and allocations are taken from the
2057 back. This results in LRU (FIFO) allocation order, which tends
2058 to give each chunk an equal opportunity to be consolidated with
2059 adjacent freed chunks, resulting in larger free chunks and less
2060 fragmentation.
2061
2062 To simplify use in double-linked lists, each bin header acts
2063 as a malloc_chunk. This avoids special-casing for headers.
2064 But to conserve space and improve locality, we allocate
2065 only the fd/bk pointers of bins, and then use repositioning tricks
2066 to treat these as the fields of a malloc_chunk*.
2067*/
2068
2069typedef struct chunkinfo* mbinptr;
2070
2071/* addressing -- note that bin_at(0) does not exist */
2072#define bin_at(m, i) (&(m)->bins[i])
2073
2074/* analog of ++bin */
2075#define next_bin(b) (b+1)
2076
2077/* Reminders about list directionality within bins */
2078#define first(b) ((b)->fd)
2079#define last(b) ((b)->bk)
2080
2081/* Take a chunk off a bin list */
2082#define unlink(P, BK, FD) { \
2083 FD = P->fd; \
2084 BK = P->bk; \
2085 FD->bk = BK; \
2086 BK->fd = FD; \
2087}
2088
2089/*
2090 Indexing
2091
2092 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2093 8 bytes apart. Larger bins are approximately logarithmically spaced:
2094
2095 64 bins of size 8
2096 32 bins of size 64
2097 16 bins of size 512
2098 8 bins of size 4096
2099 4 bins of size 32768
2100 2 bins of size 262144
2101 1 bin of size what's left
2102
2103 The bins top out around 1MB because we expect to service large
2104 requests via mmap.
2105*/
2106
2107#define NBINS 96
2108#define NSMALLBINS 32
2109#define SMALLBIN_WIDTH 8
2110#define MIN_LARGE_SIZE 256
2111
2112#define in_smallbin_range(sz) \
2113 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2114
2115#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2116
2117/*
2118 Compute index for size. We expect this to be inlined when
2119 compiled with optimization, else not, which works out well.
2120*/
2121static int largebin_index(size_t sz) {
2122
2123 unsigned long xx = sz >> SMALLBIN_WIDTH;
2124
2125 if (xx < 0x10000)
2126 {
2127 unsigned int m; /* bit position of highest set bit of m */
2128
2129 /* On intel, use BSRL instruction to find highest bit */
2130#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2131
2132 unsigned int x = (unsigned int) xx;
2133
2134 __asm__("bsrl %1,%0\n\t"
2135 : "=r" (m)
2136 : "rm" (x));
2137
2138#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2139
2140 __asm__("bsrq %1,%0\n\t"
2141 : "=r" (m)
2142 : "rm" (xx));
2143
2144#else
2145
2146 /* Taken from Bit Twiddling Hacks
2147 * http://graphics.stanford.edu/~seander/bithacks.html
2148 * public domain
2149 */
2150 unsigned int v = (unsigned int) xx;
2151 register unsigned int shift;
2152
2153 m = (v > 0xFFFF) << 4; v >>= m;
2154 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2155 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2156 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2157 m |= (v >> 1);
2158
2159#endif
2160
2161 /* Use next 2 bits to create finer-granularity bins */
2162 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2163 }
2164 else
2165 {
2166 return NBINS-1;
2167 }
2168}
2169
2170#define bin_index(sz) \
2171 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2172
2173/*
2174 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2175 first bin that is maintained in sorted order. This must
2176 be the smallest size corresponding to a given bin.
2177
2178 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2179 best fit guarantees to sometimes speed up malloc by increasing value.
2180 Doing this means that malloc may choose a chunk that is
2181 non-best-fitting by up to the width of the bin.
2182
2183 Some useful cutoff values:
2184 512 - all bins sorted
2185 2560 - leaves bins <= 64 bytes wide unsorted
2186 12288 - leaves bins <= 512 bytes wide unsorted
2187 65536 - leaves bins <= 4096 bytes wide unsorted
2188 262144 - leaves bins <= 32768 bytes wide unsorted
2189 -1 - no bins sorted (not recommended!)
2190*/
2191
2192/* #define FIRST_SORTED_BIN_SIZE 65536 */
2193
2194#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2195
[172]2196
[171]2197/*
2198 Unsorted chunks
2199
2200 All remainders from chunk splits, as well as all returned chunks,
2201 are first placed in the "unsorted" bin. They are then placed
2202 in regular bins after malloc gives them ONE chance to be used before
2203 binning. So, basically, the unsorted_chunks list acts as a queue,
2204 with chunks being placed on it in free (and malloc_consolidate),
2205 and taken off (to be either used or placed in bins) in malloc.
2206*/
2207
2208/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2209#define unsorted_chunks(M) (bin_at(M, 1))
2210
2211/*
2212 Top
2213
2214 The top-most available chunk (i.e., the one bordering the end of
2215 available memory) is treated specially. It is never included in
2216 any bin, is used only if no other chunk is available, and is
2217 released back to the system if it is very large (see
2218 M_TRIM_THRESHOLD). Because top initially
2219 points to its own bin with initial zero size, thus forcing
2220 extension on the first malloc request, we avoid having any special
2221 code in malloc to check whether it even exists yet. But we still
2222 need to do so when getting memory from system, so we make
2223 initial_top treat the bin as a legal but unusable chunk during the
2224 interval between initialization and the first call to
2225 sYSMALLOc. (This is somewhat delicate, since it relies on
2226 the 2 preceding words to be zero during this interval as well.)
2227*/
2228
2229/* Conveniently, the unsorted bin can be used as dummy top on first call */
2230#define initial_top(M) (unsorted_chunks(M))
2231
2232/*
2233 Binmap
2234
2235 To help compensate for the large number of bins, a one-level index
2236 structure is used for bin-by-bin searching. `binmap' is a
2237 bitvector recording whether bins are definitely empty so they can
2238 be skipped over during during traversals. The bits are NOT always
2239 cleared as soon as bins are empty, but instead only
2240 when they are noticed to be empty during traversal in malloc.
2241*/
2242
2243/* Conservatively use 32 bits per map word, even if on 64bit system */
2244#define BINMAPSHIFT 5
2245#define BITSPERMAP (1U << BINMAPSHIFT)
2246#define BINMAPSIZE (NBINS / BITSPERMAP)
2247
2248#define idx2block(i) ((i) >> BINMAPSHIFT)
2249#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2250
2251#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2252#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2253#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2254
2255/*
2256 Fastbins
2257
2258 An array of lists holding recently freed small chunks. Fastbins
2259 are not doubly linked. It is faster to single-link them, and
2260 since chunks are never removed from the middles of these lists,
2261 double linking is not necessary. Also, unlike regular bins, they
2262 are not even processed in FIFO order (they use faster LIFO) since
2263 ordering doesn't much matter in the transient contexts in which
2264 fastbins are normally used.
2265
2266 Chunks in fastbins keep their inuse bit set, so they cannot
2267 be consolidated with other free chunks. malloc_consolidate
2268 releases all chunks in fastbins and consolidates them with
2269 other free chunks.
2270*/
2271
2272typedef struct chunkinfo* mfastbinptr;
2273
2274/* offset 2 to use otherwise unindexable first 2 bins */
2275#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2276
2277/* The maximum fastbin request size we support */
2278#define MAX_FAST_SIZE 80
2279
2280#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2281
2282/*
2283 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2284 that triggers automatic consolidation of possibly-surrounding
2285 fastbin chunks. This is a heuristic, so the exact value should not
2286 matter too much. It is defined at half the default trim threshold as a
2287 compromise heuristic to only attempt consolidation if it is likely
2288 to lead to trimming. However, it is not dynamically tunable, since
2289 consolidation reduces fragmentation surrounding loarge chunks even
2290 if trimming is not used.
2291*/
2292
2293#define FASTBIN_CONSOLIDATION_THRESHOLD \
2294 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2295
2296/*
2297 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2298 they are used as flags.
2299*/
2300
2301/*
2302 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2303 freed chunks at all. It is set true when entering a chunk into any
2304 bin.
2305*/
2306
2307#define ANYCHUNKS_BIT (1U)
2308
2309#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2310#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2311#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2312
2313/*
2314 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2315 some fastbin chunks. It is set true on entering a chunk into any
2316 fastbin, and cleared only in malloc_consolidate.
2317*/
2318
2319#define FASTCHUNKS_BIT (2U)
2320
2321#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2322#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2323#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2324
2325/*
2326 Set value of max_fast.
2327 Use impossibly small value if 0.
2328*/
2329
2330#define set_max_fast(M, s) \
2331 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2332 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2333
2334#define get_max_fast(M) \
2335 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2336
2337
2338/*
2339 morecore_properties is a status word holding dynamically discovered
2340 or controlled properties of the morecore function
2341*/
2342
2343#define MORECORE_CONTIGUOUS_BIT (1U)
2344
2345#define contiguous(M) \
2346 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2347#define noncontiguous(M) \
2348 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2349#define set_contiguous(M) \
2350 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2351#define set_noncontiguous(M) \
2352 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2353
2354#define MORECORE_32BIT_BIT (2U)
2355
2356#define morecore32bit(M) \
2357 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2358#define nonmorecore32bit(M) \
2359 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2360#define set_morecore32bit(M) \
2361 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2362#define set_nonmorecore32bit(M) \
2363 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2364
2365
2366
2367/* ----------------- dnmalloc -------------------- */
2368
2369/* size of pages */
2370#define PGSIZE malloc_getpagesize
2371/* pointer size */
2372#define PTRSIZE sizeof(long)
2373
2374
2375
2376/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2377 address space: the hashtable has granularity of 16*8, set it to something
2378 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2379 page)
2380*/
2381
2382/* The maximum heap size that dnmalloc can operate with
2383 * represented in hex to avoid annoying gcc warning
2384 *
2385 * Avoid integer overflow, cover complete 32bit address
2386 * space for portability. With deferred allocation, the
2387 * hashtable size is a non-issue.
2388 */
2389#define HEAPMAXSIZE_HALF 0x80000000UL
2390
2391/* How many elements are stored in the linked list */
2392#define LINKEDLSTELS 8
2393
2394/* Minimum size of a chunk */
2395
[172]2396#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2397# define MINCHUNKSIZE 32
2398#else
2399# define MINCHUNKSIZE 16
2400#endif
2401
2402
[171]2403/* The amount of hashtable entries for each page:
2404Pagesize divded by the numer of elements in the linkedlists
2405divided by the minimum chunk size
2406*/
2407#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2408
2409/* The amount of hashtable entries needed to manage the memory:
2410Maximum heap size divided by page size multiplied by the amount
2411of chunk info's per page
2412*/
2413#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2414
2415/* Initial size of the map for the hashtable
2416Amount of entries muliplied by pointer size
2417*/
2418#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2419
2420/* Amount of free chunks that the system should allocate at the start */
2421#define NUMBER_FREE_CHUNKS 32768
2422
2423/* Initial size of the chunk info region,
2424also used when growing the region */
2425#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2426
2427/* Start address of the heap */
2428char *startheap;
2429
2430/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2431chunkinfoptr *hashtable;
2432
2433/* Current chunkinfo region */
2434struct cireginfo *currciinfo = 0;
2435struct cireginfo *firstciinfo = 0;
2436
2437unsigned long totalcictr = 0;
2438
2439
2440/* Initialize the area for chunkinfos and the hashtable and protect
2441 * it with non-writable pages
2442 */
2443static void
2444dnmalloc_init ()
2445{
2446 void *hashtb;
2447 int mprot;
2448 int flags = MAP_PRIVATE;
2449
2450 /* Allocate the malloc_state struct */
2451 malloc_mmap_state();
2452
2453 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2454 * systems use defered allocation anyway.
2455 */
2456#ifdef MAP_NORESERVE
2457 flags |= MAP_NORESERVE;
2458#endif
2459
2460 /* Always start at 0, hashtable covers whole 32bit address space
2461 */
2462#define STARTHEAP_IS_ZERO
2463 startheap = 0;
2464
2465 /* Map space for the hashtable */
2466#if PARANOIA > 1
2467 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2468#else
2469 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2470#endif
2471
2472#ifdef NDEBUG
2473 if (hashtb == MAP_FAILED) {
2474 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2475 abort ();
2476 }
2477#else
2478 assert(hashtb != MAP_FAILED);
2479#endif
2480
2481 /* Protect the hashtable with non-writable pages */
2482 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2483#ifdef NDEBUG
2484 if (mprot == -1) {
2485 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2486 strerror (errno));
2487 abort ();
2488 }
2489#else
2490 assert(mprot != -1);
2491#endif
2492
2493 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2494 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2495
2496 /* Protect the hashtable with non-writable pages */
2497#if PARANOIA > 1
2498 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2499#ifdef NDEBUG
2500 if (mprot == -1) {
2501 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2502 strerror (errno));
2503 abort ();
2504 }
2505#else
2506 assert(mprot != -1);
2507#endif
2508#endif
2509}
2510
2511
2512
2513/* Extend the region for chunk infos by mapping more memory before the region */
2514static void
2515cireg_extend ()
2516{
2517 void *newcireg;
2518 int mprot;
2519 struct cireginfo *tempciinfo = 0;
2520
2521#if PARANOIA > 1
2522 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2523#else
2524 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2525#endif
2526
2527#ifdef NDEBUG
2528 if (newcireg == MAP_FAILED)
2529 {
2530 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2531 strerror (errno));
2532 abort ();
2533 }
2534#else
2535 assert(newcireg != MAP_FAILED);
2536#endif
2537 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2538#ifdef NDEBUG
2539 if (mprot == -1) {
2540 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2541 strerror (errno));
2542 abort ();
2543 }
2544#else
2545 assert(mprot != -1);
2546#endif
2547 newcireg = ((char*)newcireg)+PGSIZE;
2548
2549#if PARANOIA > 1
2550 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2551#ifdef NDEBUG
2552 if (mprot == -1) {
2553 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2554 strerror (errno));
2555 abort ();
2556 }
2557#else
2558 assert(mprot != -1);
2559#endif
2560#endif
2561
2562 tempciinfo = currciinfo;
2563 currciinfo = (struct cireginfo *) newcireg;
2564 if (tempciinfo)
2565 tempciinfo->next = currciinfo;
2566 currciinfo->position = 1;
2567 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2568 if (!firstciinfo)
2569 firstciinfo = currciinfo;
2570 totalcictr++;
[172]2571 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
[171]2572}
2573
2574
2575/* Get a free chunkinfo */
2576static chunkinfoptr
2577cireg_getfree ()
2578{
2579 chunkinfoptr freeci;
2580 chunkinfoptr freelst = 0;
2581 struct cireginfo *newciinfo = firstciinfo;
2582
2583 if (newciinfo) {
2584 freelst = newciinfo->freelist;
2585
2586 if (!freelst && newciinfo->next) {
2587 do {
2588 newciinfo = newciinfo->next;
2589 freelst = newciinfo->freelist;
2590 } while (!freelst && newciinfo->next);
2591 }
2592 }
2593
2594 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2595 if (freelst)
2596 {
[172]2597 freeci = freelst;
2598 newciinfo->freecounter--;
2599 newciinfo->freelist = freelst->fd;
2600
2601 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2602 sizeof(struct chunkinfo));
2603
2604 freeci->prev_size = 0;
2605 freeci->size = 0;
2606 freeci->req = 0;
2607 freeci->hash_next = NULL;
2608 freeci->fd = NULL;
2609 freeci->bk = NULL;
2610 freeci->chunk = NULL;
2611 return (freeci);
[171]2612 }
2613 else
2614 {
2615 /* No free chunkinfos, check if chunkinfo region still has place
2616 * for a chunkinfo. If not, extend the region.
2617 */
2618 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2619 cireg_extend ();
2620 /* Get a chunkinfo from the chunkinfo region */
2621 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2622 currciinfo->freecounter--;
2623 currciinfo->position++;
[172]2624
2625 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2626 sizeof(struct chunkinfo));
2627
[171]2628 return (freeci);
2629 }
2630}
2631
2632static void freeciregion(struct cireginfo *freeme) {
2633 /* free the chunkinfo region */
2634 struct cireginfo *newciinfo = firstciinfo;
2635 struct cireginfo *prevciinfo = firstciinfo;
2636 void *unmapme;
[172]2637
[171]2638 while (newciinfo && newciinfo != freeme) {
2639 prevciinfo = newciinfo;
2640 newciinfo = newciinfo->next;
2641 }
2642 assert(freeme == newciinfo); /* rw */
2643 assert(newciinfo != NULL); /* rw */
2644 if (newciinfo)
2645 prevciinfo->next = newciinfo->next;
2646 unmapme = (void *) ((char*)freeme - PGSIZE);
[172]2647 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
[171]2648#if PARANOIA > 1
2649 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2650#else
2651 munmap(unmapme, CIREGSIZE+PGSIZE);
2652#endif
2653}
2654
2655
2656static void freecilst_add(chunkinfoptr p) {
2657
2658 struct cireginfo *newciinfo;
[172]2659
[171]2660 newciinfo = currciinfo;
2661 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2662 p->fd = newciinfo->freelist;
2663 newciinfo->freelist = p;
2664 newciinfo->freecounter++;
[172]2665 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2666 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2667 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2668 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2669 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2670 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2671 } else {
2672 newciinfo = firstciinfo;
2673 if (newciinfo) {
2674 do {
2675 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2676 p->fd = newciinfo->freelist;
2677 newciinfo->freelist = p;
2678 newciinfo->freecounter++;
[172]2679 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2680 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2681 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2682 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2683 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2684 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2685 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2686 freeciregion(newciinfo);
2687 break;
2688 }
2689 newciinfo = newciinfo->next;
2690 } while (newciinfo);
2691 }
2692 }
2693}
2694
2695/* Calculate the hash table entry for a chunk */
2696#ifdef STARTHEAP_IS_ZERO
2697#define hash(p) (((unsigned long) p) >> 7)
2698#else
2699#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2700#endif
2701
2702static void
2703hashtable_add (chunkinfoptr ci)
2704{
2705 chunkinfoptr temp, next;
2706 unsigned long hashval;
2707 mchunkptr cic = chunk (ci);
2708
2709 hashval = hash (cic);
2710
2711 if (hashval < AMOUNTHASH) {
2712
2713 temp = hashtable[hashval];
2714
2715#ifdef DNMALLOC_DEBUG
2716 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2717#endif
2718
2719 /* If no pointer to a chunk info list is stored at this location
2720 * in the hashtable or if the chunk's address is smaller than the
2721 * one present, add the chunk to the front of the linked list
2722 */
2723 if (temp == 0 || chunk (temp) > cic)
2724 {
2725 ci->hash_next = temp;
2726 hashtable[hashval] = ci;
2727 if (!temp) /* more likely case */
2728 goto out;
2729 temp->prev_size = chunksize(ci);
2730 return;
2731 }
2732 else
2733 {
2734 /* We must place the chunk in the linked list for this hashentry
2735 * Loop to end of list or to a position where temp's chunk's address
2736 * is larger than the new chunkinfo's chunk's address
2737 */
2738 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2739 {
2740 ci->hash_next = temp->hash_next;
2741 temp->hash_next = ci;
2742 }
2743 else
2744 {
2745 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2746 {
2747 temp = temp->hash_next;
2748 }
2749 /* Place in linked list if not already there */
2750 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2751 {
2752 ci->hash_next = temp->hash_next;
2753 temp->hash_next = ci;
2754 }
2755 }
2756 }
2757 }
2758 else {
2759#ifdef DNMALLOC_CHECKS
2760 if (hashval >= AMOUNTHASH) {
2761 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2762 abort();
2763 }
2764#else
2765 assert(hashval < AMOUNTHASH);
2766#endif
2767 }
2768
2769 out:
2770 next = next_chunkinfo(ci);
2771 if (!next)
2772 return;
2773 next->prev_size = chunksize(ci);
2774}
2775
2776static void
2777hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2778{
2779 chunkinfoptr next;
2780
2781#ifdef DNMALLOC_DEBUG
2782 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2783 (unsigned long)hash(chunk(ci_insert));
2784#endif
2785
2786 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2787 hashtable_add(ci_insert);
2788 }
2789 else {
[172]2790
[171]2791 ci_insert->hash_next = ci_orig->hash_next;
2792 ci_orig->hash_next = ci_insert;
2793
2794 /* added for prevsize */
2795 if (!(ci_insert->hash_next))
2796 next = next_chunkinfo(ci_insert);
2797 else
2798 next = ci_insert->hash_next;
2799
2800 if (!next)
2801 {
2802 ci_insert->prev_size = chunksize(ci_orig);
2803 }
2804 else
2805 {
2806 next->prev_size = chunksize(ci_insert);
2807 ci_insert->prev_size = chunksize(ci_orig);
2808 }
2809 }
2810}
2811
2812static void
2813hashtable_remove (mchunkptr p)
2814{
2815 chunkinfoptr prevtemp, temp;
2816 unsigned long hashval;
2817
2818 hashval = hash (p);
2819#ifdef DNMALLOC_DEBUG
2820 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2821#endif
2822 assert(hashval < AMOUNTHASH); /* rw */
2823 prevtemp = temp = hashtable[hashval];
2824 if (chunk (temp) == p) {
2825 hashtable[hashval] = temp->hash_next;
2826 }
2827 else
2828 {
2829 if (temp && chunk (temp) != p) {
2830 do
2831 {
2832 prevtemp = temp;
2833 temp = temp->hash_next;
2834 } while (temp && chunk (temp) != p);
2835 }
2836#ifdef DNMALLOC_CHECKS
2837 if (!temp) {
2838 fprintf (stderr,
2839 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2840 p, hashval);
2841 abort();
2842 }
2843#else
2844 assert(temp != NULL);
2845#endif
2846 prevtemp->hash_next = temp->hash_next;
2847 }
2848}
2849
[172]2850/* mmapped chunks are multiples of pagesize, no hash_nexts,
2851 * just remove from the hashtable
2852 */
[171]2853#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2854
2855static void
2856hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2857{
2858 unsigned long hashval;
2859 chunkinfoptr next;
2860
2861#ifdef DNMALLOC_DEBUG
[172]2862 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
[171]2863#endif
2864
2865 if (ci_orig->hash_next != ci_todelete) {
2866 hashval = hash(chunk(ci_todelete));
2867 assert(hashval < AMOUNTHASH); /* rw */
2868#ifdef DNMALLOC_CHECKS
2869 if (hashtable[hashval] != ci_todelete ) {
2870 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2871 }
2872#else
2873 assert(hashtable[hashval] == ci_todelete);
2874#endif
2875 hashtable[hashval] = ci_todelete->hash_next;
2876 }
2877
2878 else {
2879 ci_orig->hash_next = ci_todelete->hash_next;
2880 if (!ci_orig->hash_next) {
2881 next = next_chunkinfo(ci_orig);
2882 } else {
2883 next = ci_orig->hash_next;
2884 }
2885 if (next)
2886 next->prev_size = chunksize(ci_orig);
2887
2888 }
2889}
2890
2891
2892static chunkinfoptr
2893hashtable_lookup (mchunkptr p)
2894{
2895 chunkinfoptr ci;
2896 unsigned long hashval;
2897
2898 /* if we were called wrongly
2899 * if ((char *) p < startheap) return 0;
2900 */
2901 if ((char *) p >= startheap)
2902 {
2903 hashval = hash (p);
2904 assert(hashval < AMOUNTHASH); /* rw */
2905 ci = hashtable[hashval];
2906 if (ci && chunk (ci) == p)
2907 return ci;
2908
2909 if (ci) {
2910 do {
2911 ci = ci->hash_next;
2912 } while (ci && chunk (ci) != p);
2913 }
2914#ifdef DNMALLOC_CHECKS
2915 // This should never occur but if it does, we'd like to know
2916 if (!ci) {
2917 fprintf (stderr,
2918 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2919 p, hashval);
2920 abort();
2921 }
2922#else
2923 assert(ci != NULL);
2924#endif
2925 return ci;
2926 }
2927 return 0;
2928}
2929
2930
2931
2932/*
2933 ----------- Internal state representation and initialization -----------
2934*/
2935
2936struct malloc_state {
2937
2938 /* The maximum chunk size to be eligible for fastbin */
2939 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2940
2941 /* Fastbins */
2942 mfastbinptr fastbins[NFASTBINS];
2943
2944 /* Base of the topmost chunk -- not otherwise kept in a bin */
2945 chunkinfoptr top;
2946
2947 /* The remainder from the most recent split of a small request */
2948 chunkinfoptr last_remainder;
2949
2950 /* Normal bins */
2951 struct chunkinfo bins[NBINS];
2952
2953 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2954 unsigned int binmap[BINMAPSIZE+1];
2955
2956 /* Tunable parameters */
2957 CHUNK_SIZE_T trim_threshold;
2958 INTERNAL_SIZE_T top_pad;
2959 INTERNAL_SIZE_T mmap_threshold;
2960
2961 /* Memory map support */
2962 int n_mmaps;
2963 int n_mmaps_max;
2964 int max_n_mmaps;
2965
2966 /* Cache malloc_getpagesize */
2967 unsigned int pagesize;
2968
2969 /* Canary */
2970 char guard_stored[GUARD_SIZE];
2971
2972 /* Track properties of MORECORE */
2973 unsigned int morecore_properties;
2974
2975 /* Statistics */
2976 INTERNAL_SIZE_T mmapped_mem;
2977 INTERNAL_SIZE_T sbrked_mem;
2978 INTERNAL_SIZE_T max_sbrked_mem;
2979 INTERNAL_SIZE_T max_mmapped_mem;
2980 INTERNAL_SIZE_T max_total_mem;
2981};
2982
2983typedef struct malloc_state *mstate;
2984
2985/*
2986 There is exactly one instance of this struct in this malloc.
2987 If you are adapting this malloc in a way that does NOT use a static
2988 malloc_state, you MUST explicitly zero-fill it before using. This
2989 malloc relies on the property that malloc_state is initialized to
2990 all zeroes (as is true of C statics).
2991*/
2992
2993static struct malloc_state * av_ = NULL; /* never directly referenced */
2994
2995/*
2996 All uses of av_ are via get_malloc_state().
2997 At most one "call" to get_malloc_state is made per invocation of
2998 the public versions of malloc and free, but other routines
2999 that in turn invoke malloc and/or free may call more then once.
3000 Also, it is called in check* routines if DEBUG is set.
3001*/
3002
3003#define get_malloc_state() (av_)
3004
3005/*
3006 Initialize a malloc_state struct.
3007
3008 This is called only from within malloc_consolidate, which needs
3009 be called in the same contexts anyway. It is never called directly
3010 outside of malloc_consolidate because some optimizing compilers try
3011 to inline it at all call points, which turns out not to be an
3012 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3013*/
3014
3015#if __STD_C
3016static void malloc_mmap_state(void)
3017#else
3018static void malloc_mmap_state()
3019#endif
3020{
3021 int mprot;
3022 unsigned long pagesize = malloc_getpagesize;
3023 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3024
3025 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3026
3027
3028#ifdef NDEBUG
3029 if (foo == MAP_FAILED) {
3030 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3031 abort ();
3032 }
3033#else
3034 assert(foo != MAP_FAILED);
3035#endif
3036
3037 mprot = mprotect(foo, pagesize, PROT_NONE);
3038#ifdef NDEBUG
3039 if (mprot == -1) {
3040 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3041 strerror (errno));
3042 abort ();
3043 }
3044#else
3045 assert(mprot != -1);
3046#endif
3047
3048 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3049
3050 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3051
3052 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3053#ifdef NDEBUG
3054 if (mprot == -1) {
3055 fprintf (stderr,
3056 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3057 strerror (errno));
3058 abort ();
3059 }
3060#else
3061 assert(mprot != -1);
3062#endif
3063}
3064
3065#if __STD_C
3066static void malloc_init_state(mstate av)
3067#else
3068static void malloc_init_state(av) mstate av;
3069#endif
3070{
3071 int i;
3072 mbinptr bin;
3073
3074 void * morecore_test = MORECORE(0);
3075 unsigned long hashval;
3076
3077 /* Test morecore function
3078 */
3079 set_morecore32bit(av);
3080
3081 if (morecore_test == MORECORE_FAILURE)
3082 {
3083 set_nonmorecore32bit(av);
3084 }
3085 else
3086 {
3087 /* On 64bit systems, the heap may be located above the
3088 * 32bit address space. Since mmap() probably still can be
3089 * convinced to map within 32bit, we don't use sbrk().
3090 */
3091 hashval = hash (morecore_test);
3092 if (hashval >= AMOUNTHASH)
3093 {
3094 set_nonmorecore32bit(av);
3095 }
3096 }
3097
3098
3099 /* Establish circular links for normal bins */
3100 for (i = 1; i < NBINS; ++i) {
3101 bin = bin_at(av,i);
3102 bin->fd = bin->bk = bin;
3103 }
3104
3105 av->top_pad = DEFAULT_TOP_PAD;
3106 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3107 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3108 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3109
3110#if MORECORE_CONTIGUOUS
3111 set_contiguous(av);
3112#else
3113 set_noncontiguous(av);
3114#endif
3115
3116 set_max_fast(av, DEFAULT_MXFAST);
3117
3118 av->top = cireg_getfree ();
3119 av->top->chunk = (mchunkptr) startheap;
3120 av->top->size = 0;
3121 set_previnuse(av->top);
3122 clear_inuse(av->top);
3123 hashtable[0] = av->top;
3124 av->pagesize = malloc_getpagesize;
3125
3126 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3127
3128}
3129
3130/*
3131 Other internal utilities operating on mstates
3132*/
3133
3134#if __STD_C
3135static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3136static int sYSTRIm(size_t, mstate);
3137static void malloc_consolidate(mstate);
3138#else
3139static Void_t* sYSMALLOc();
3140static int sYSTRIm();
3141static void malloc_consolidate();
3142#endif
3143
3144/* dnmalloc functions */
3145/* needs mstate so moved here */
3146
3147static chunkinfoptr
3148next_chunkinfo (chunkinfoptr ci)
3149{
3150 mchunkptr nextp;
3151 unsigned long hashval;
3152 chunkinfoptr cinfonextp;
3153 mstate av = get_malloc_state();
3154
3155 /* ci is not the last element in the linked list, just
3156 return the next chunkinfo from the list
3157 */
3158 if (!ci->hash_next)
3159 {
3160 /* ci is the last element, find the next chunkinfo by
3161 * looking up the chunkinfo for the chunk that is after p's chunk
3162 */
3163 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3164
3165 if (!(nextp == av->top->chunk))
3166 {
3167 hashval = hash (nextp);
3168 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3169 cinfonextp = hashtable[hashval];
3170 if (cinfonextp && chunk (cinfonextp) == nextp)
3171 return cinfonextp;
3172
3173#ifdef DNMALLOC_CHECKS_EXTRA
3174 /* This seems bogus; a chunkinfo may legally have no nextp if
3175 * it's the last one allocated (?)
3176 */
3177 else {
3178 if (cinfonextp)
3179 fprintf (stderr,
3180 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3181 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3182 else
3183 fprintf (stderr,
3184 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3185 chunk(ci), hashval, "null", "null", nextp);
3186 }
3187#endif
3188
3189 return NULL;
3190 }
3191 else
3192 {
3193 return av->top;
3194 }
3195
3196 }
3197 else
3198 {
3199 return (ci->hash_next);
3200 }
3201}
3202
3203static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3204 mchunkptr nextp;
3205 if (oldp->hash_next == newp)
3206 return 1;
3207 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3208 if (nextp == chunk(newp))
3209 return 1;
3210 return 0;
3211}
3212
3213
3214
3215/* Get the chunkinfo of the physically previous chunk */
3216/* Since we disposed of prev_size, we need this function to find the previous */
3217
3218static chunkinfoptr
3219prev_chunkinfo (chunkinfoptr ci)
3220{
3221 unsigned int i;
3222 chunkinfoptr prev;
3223 mchunkptr prevchunk = 0;
3224 /* chunkinfoptr temp; */
3225
3226 /* Get the hashtable location of the chunkinfo */
3227 i = hash (chunk (ci));
3228 assert(i < AMOUNTHASH); /* rw */
3229
3230 /* Get the first element of the linked list of chunkinfo's that contains p */
3231 prev = hashtable[i];
3232
3233 if (ci == prev) {
3234 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3235 i = hash(prevchunk);
3236 assert(i < AMOUNTHASH); /* rw */
3237 /* Loop over the linked list until we reach the last element */
3238 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3239 } else {
3240 /* p is not the first element in the linked list, we can just
3241 loop over the list and return the previous
3242 */
3243 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3244 }
3245
3246 return prev;
3247}
3248
3249
3250/*
3251 Debugging support
3252 Dnmalloc broke dlmallocs debugging functions, should fix them some
3253 time in the future, for now leave them undefined.
3254*/
3255
3256#define check_chunk(P)
3257#define check_free_chunk(P)
3258#define check_inuse_chunk(P)
3259#define check_remalloced_chunk(P,N)
3260#define check_malloced_chunk(P,N)
3261#define check_malloc_state()
3262
3263
3264/* ----------- Routines dealing with system allocation -------------- */
3265
3266/*
3267 sysmalloc handles malloc cases requiring more memory from the system.
3268 On entry, it is assumed that av->top does not have enough
3269 space to service request for nb bytes, thus requiring that av->top
3270 be extended or replaced.
3271*/
3272
3273#if __STD_C
3274static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3275#else
3276static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3277#endif
3278{
3279 chunkinfoptr old_top; /* incoming value of av->top */
3280 INTERNAL_SIZE_T old_size; /* its size */
3281 char* old_end; /* its end address */
3282
3283 long size; /* arg to first MORECORE or mmap call */
3284 char* brk; /* return value from MORECORE */
3285
3286 long correction; /* arg to 2nd MORECORE call */
3287 char* snd_brk; /* 2nd return val */
3288
3289 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3290 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3291 char* aligned_brk; /* aligned offset into brk */
3292
3293 chunkinfoptr p; /* the allocated/returned chunk */
3294 chunkinfoptr remainder; /* remainder from allocation */
3295 chunkinfoptr fencepost; /* fencepost */
3296 CHUNK_SIZE_T remainder_size; /* its size */
3297
3298 CHUNK_SIZE_T sum; /* for updating stats */
3299
3300 size_t pagemask = av->pagesize - 1;
3301
3302#ifdef DNMALLOC_DEBUG
3303 fprintf(stderr, "Enter sysmalloc\n");
3304#endif
3305 /*
3306 If there is space available in fastbins, consolidate and retry
3307 malloc from scratch rather than getting memory from system. This
3308 can occur only if nb is in smallbin range so we didn't consolidate
3309 upon entry to malloc. It is much easier to handle this case here
3310 than in malloc proper.
3311 */
3312
3313
3314 if (have_fastchunks(av)) {
[172]3315 Void_t * retval;
[171]3316 assert(in_smallbin_range(nb));
3317 malloc_consolidate(av);
3318#ifdef DNMALLOC_DEBUG
3319 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3320#endif
[172]3321 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3322 VALGRIND_FREELIKE_BLOCK(retval, 0);
3323 return retval;
[171]3324 }
3325
3326
3327 /*
3328 If have mmap, and the request size meets the mmap threshold, and
3329 the system supports mmap, and there are few enough currently
3330 allocated mmapped regions, try to directly map this request
3331 rather than expanding top.
3332 */
3333
3334 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3335 (av->n_mmaps < av->n_mmaps_max))) {
3336
3337 char* mm; /* return value from mmap call*/
3338
3339 /*
3340 Round up size to nearest page. For mmapped chunks, the overhead
3341 is one SIZE_SZ unit larger than for normal chunks, because there
3342 is no following chunk whose prev_size field could be used.
3343 */
3344 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3345
3346 /* Don't try if size wraps around 0 */
3347 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3348
3349
3350 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3351
3352 if (mm != (char*)(MORECORE_FAILURE)) {
3353
[172]3354 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3355
[171]3356 /*
3357 The offset to the start of the mmapped region is stored
3358 in the prev_size field of the chunk. This allows us to adjust
3359 returned start address to meet alignment requirements here
3360 and in memalign(), and still be able to compute proper
3361 address argument for later munmap in free() and realloc().
3362 */
3363
3364 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3365 p = cireg_getfree();
3366
3367 if (front_misalign > 0) {
3368 correction = MALLOC_ALIGNMENT - front_misalign;
3369 p->chunk = (mchunkptr)(mm + correction);
3370 p->hash_next = (chunkinfoptr) correction;
3371 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3372 }
3373 else {
3374 p->chunk = (mchunkptr)mm;
3375 p->hash_next = 0;
3376 set_head(p, size|INUSE|IS_MMAPPED);
3377 }
3378 hashtable_add(p);
3379 /* update statistics */
3380
3381 if (++av->n_mmaps > av->max_n_mmaps)
3382 av->max_n_mmaps = av->n_mmaps;
3383
3384 sum = av->mmapped_mem += size;
3385 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3386 av->max_mmapped_mem = sum;
3387 sum += av->sbrked_mem;
3388 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3389 av->max_total_mem = sum;
3390
3391 check_chunk(p);
3392
3393#ifdef DNMALLOC_DEBUG
3394 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3395 size, (unsigned long)/* size_t */av->max_total_mem );
3396#endif
3397 return chunk(p);
3398 }
3399 }
3400 }
3401
3402 /* Record incoming configuration of top */
3403
3404 old_top = av->top;
3405 old_size = chunksize(old_top);
3406 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3407
3408 brk = snd_brk = (char*)(MORECORE_FAILURE);
3409
3410 /*
3411 If not the first time through, we require old_size to be
3412 at least MINSIZE and to have prev_inuse set.
3413 */
3414
3415 /* assert((old_top == initial_top(av) && old_size == 0) ||
3416 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3417 prev_inuse(old_top))); */
3418
3419 /* Precondition: not enough current space to satisfy nb request */
3420 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3421
3422 /* Precondition: all fastbins are consolidated */
3423 assert(!have_fastchunks(av));
3424
3425 /* Request enough space for nb + pad + overhead */
3426 size = nb + av->top_pad + MINSIZE;
3427
3428 /*
3429 If contiguous, we can subtract out existing space that we hope to
3430 combine with new space. We add it back later only if
3431 we don't actually get contiguous space.
3432 */
3433 if (contiguous(av))
3434 size -= old_size;
3435
3436 /*
3437 Round to a multiple of page size.
3438 If MORECORE is not contiguous, this ensures that we only call it
3439 with whole-page arguments. And if MORECORE is contiguous and
3440 this is not first time through, this preserves page-alignment of
3441 previous calls. Otherwise, we correct to page-align below.
3442 */
3443
3444 size = (size + pagemask) & ~pagemask;
3445
3446 /*
3447 Don't try to call MORECORE if argument is so big as to appear
3448 negative. Note that since mmap takes size_t arg, it may succeed
3449 below even if we cannot call MORECORE.
3450 */
3451 if (size > 0 && morecore32bit(av))
3452 brk = (char*)(MORECORE(size));
3453
3454 /*
3455 If have mmap, try using it as a backup when MORECORE fails or
3456 cannot be used. This is worth doing on systems that have "holes" in
3457 address space, so sbrk cannot extend to give contiguous space, but
3458 space is available elsewhere. Note that we ignore mmap max count
3459 and threshold limits, since the space will not be used as a
3460 segregated mmap region.
3461 */
3462 if (brk != (char*)(MORECORE_FAILURE)) {
3463 av->sbrked_mem += size;
[172]3464 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
[171]3465 }
3466
3467 else {
3468
3469#ifdef DNMALLOC_DEBUG
3470 fprintf(stderr, "Morecore failure in sysmalloc\n");
3471#endif
3472
3473 /* Cannot merge with old top, so add its size back in */
3474 if (contiguous(av))
3475 size = (size + old_size + pagemask) & ~pagemask;
3476
3477 /* If we are relying on mmap as backup, then use larger units */
3478 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3479 size = MMAP_AS_MORECORE_SIZE;
3480
3481 /* Don't try if size wraps around 0 */
3482 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3483
3484#ifdef DNMALLOC_DEBUG
3485 fprintf(stderr, "Try mmap in sysmalloc\n");
3486#endif
3487 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3488
3489 if (brk != (char*)(MORECORE_FAILURE)) {
3490
[172]3491 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3492
[171]3493 av->mmapped_mem += size;
3494#ifdef DNMALLOC_DEBUG
3495 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3496#endif
3497
3498 /* We do not need, and cannot use, another sbrk call to find end */
3499 snd_brk = brk + size;
3500
3501 /*
3502 Record that we no longer have a contiguous sbrk region.
3503 After the first time mmap is used as backup, we do not
3504 ever rely on contiguous space since this could incorrectly
3505 bridge regions.
3506 */
3507 set_noncontiguous(av);
3508 }
3509 }
3510 }
3511
3512 if (brk != (char*)(MORECORE_FAILURE)) {
3513#ifdef DNMALLOC_DEBUG
3514 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3515 size, (unsigned long)av->sbrked_mem);
3516#endif
3517 /* av->sbrked_mem += size; moved up */
3518
3519 /*
3520 If MORECORE extends previous space, we can likewise extend top size.
3521 */
3522
3523 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3524 set_head(old_top, (size + old_size) | PREV_INUSE);
3525#ifdef DNMALLOC_DEBUG
3526 fprintf(stderr, "Previous space extended\n");
3527#endif
3528 }
3529
3530 /*
3531 Otherwise, make adjustments:
3532
3533 * If the first time through or noncontiguous, we need to call sbrk
3534 just to find out where the end of memory lies.
3535
3536 * We need to ensure that all returned chunks from malloc will meet
3537 MALLOC_ALIGNMENT
3538
3539 * If there was an intervening foreign sbrk, we need to adjust sbrk
3540 request size to account for fact that we will not be able to
3541 combine new space with existing space in old_top.
3542
3543 * Almost all systems internally allocate whole pages at a time, in
3544 which case we might as well use the whole last page of request.
3545 So we allocate enough more memory to hit a page boundary now,
3546 which in turn causes future contiguous calls to page-align.
3547 */
3548
3549 else {
3550 front_misalign = 0;
3551 end_misalign = 0;
3552 correction = 0;
3553 aligned_brk = brk;
3554
3555 /*
3556 If MORECORE returns an address lower than we have seen before,
3557 we know it isn't really contiguous. This and some subsequent
3558 checks help cope with non-conforming MORECORE functions and
3559 the presence of "foreign" calls to MORECORE from outside of
3560 malloc or by other threads. We cannot guarantee to detect
3561 these in all cases, but cope with the ones we do detect.
3562 */
3563 if (contiguous(av) && old_size != 0 && brk < old_end) {
3564 set_noncontiguous(av);
3565 }
3566
3567 /* handle contiguous cases */
3568 if (contiguous(av)) {
3569
3570#ifdef DNMALLOC_DEBUG
3571 fprintf(stderr, "Handle contiguous cases\n");
3572#endif
3573 /*
3574 We can tolerate forward non-contiguities here (usually due
3575 to foreign calls) but treat them as part of our space for
3576 stats reporting.
3577 */
3578 if (old_size != 0)
3579 av->sbrked_mem += brk - old_end;
3580
3581 /* Guarantee alignment of first new chunk made from this space */
3582
3583 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3584 if (front_misalign > 0) {
3585
3586 /*
3587 Skip over some bytes to arrive at an aligned position.
3588 We don't need to specially mark these wasted front bytes.
3589 They will never be accessed anyway because
3590 prev_inuse of av->top (and any chunk created from its start)
3591 is always true after initialization.
3592 */
3593
3594 correction = MALLOC_ALIGNMENT - front_misalign;
3595 aligned_brk += correction;
3596 }
3597
3598 /*
3599 If this isn't adjacent to existing space, then we will not
3600 be able to merge with old_top space, so must add to 2nd request.
3601 */
3602
3603 correction += old_size;
3604
3605 /* Extend the end address to hit a page boundary */
3606 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3607 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3608
3609 assert(correction >= 0);
3610 snd_brk = (char*)(MORECORE(correction));
3611
3612 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3613 /*
3614 If can't allocate correction, try to at least find out current
3615 brk. It might be enough to proceed without failing.
3616 */
3617 correction = 0;
3618 snd_brk = (char*)(MORECORE(0));
3619 }
3620 else if (snd_brk < brk) {
3621 /*
3622 If the second call gives noncontiguous space even though
3623 it says it won't, the only course of action is to ignore
3624 results of second call, and conservatively estimate where
3625 the first call left us. Also set noncontiguous, so this
3626 won't happen again, leaving at most one hole.
3627
3628 Note that this check is intrinsically incomplete. Because
3629 MORECORE is allowed to give more space than we ask for,
3630 there is no reliable way to detect a noncontiguity
3631 producing a forward gap for the second call.
3632 */
3633 snd_brk = brk + size;
3634 correction = 0;
3635 set_noncontiguous(av);
3636 }
[172]3637 else {
3638 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3639 }
[171]3640
3641 }
3642
3643 /* handle non-contiguous cases */
3644 else {
3645
3646#ifdef DNMALLOC_DEBUG
3647 fprintf(stderr, "Handle non-contiguous cases\n");
3648#endif
3649
3650 /* MORECORE/mmap must correctly align */
3651 assert(aligned_OK(brk));
3652
3653 /* Find out current end of memory */
3654 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3655 snd_brk = (char*)(MORECORE(0));
3656 av->sbrked_mem += snd_brk - brk - size;
3657 }
3658#ifdef DNMALLOC_DEBUG
3659 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3660#endif
3661 }
3662
3663 /* Adjust top based on results of second sbrk.
3664 *
3665 * If mmap() has been used as backup for failed morecore(),
3666 * we end up in this branch as well.
3667 */
3668 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3669#ifdef DNMALLOC_DEBUG
3670 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3671#endif
3672 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3673 av->top = cireg_getfree();
3674 av->top->chunk = (mchunkptr)aligned_brk;
3675 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3676#ifdef DNMALLOC_DEBUG
3677 fprintf(stderr, "Adjust top, top %p size %lu\n",
3678 av->top, (unsigned long)chunksize(av->top));
3679#endif
3680 hashtable_add(av->top);
3681 av->sbrked_mem += correction;
3682
3683 /*
3684 If not the first time through, we either have a
3685 gap due to foreign sbrk or a non-contiguous region. Insert a
3686 double fencepost at old_top to prevent consolidation with space
3687 we don't own. These fenceposts are artificial chunks that are
3688 marked as inuse. Original dlmalloc had two of these but too
3689 small to use. To ensure that the linked lists contain a maximum
3690 of 8 elements we only use 1. Inuse is determined by the
3691 current rather than the next chunk anyway.
3692 */
3693
3694 if (old_size != 0) {
3695#ifdef DNMALLOC_DEBUG
3696 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3697#endif
3698 /*
3699 Shrink old_top to insert fenceposts, keeping size a
3700 multiple of MALLOC_ALIGNMENT. We know there is at least
3701 enough space in old_top to do this.
3702 */
3703#ifdef DNMALLOC_DEBUG
3704 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3705 old_top, (unsigned long)old_size);
3706#endif
3707 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3708 set_head(old_top, old_size | PREV_INUSE);
3709#ifdef DNMALLOC_DEBUG
3710 fprintf(stderr, "Adjust top, old_size after %lu\n",
3711 (unsigned long)old_size);
3712#endif
3713
3714 /*
3715 Note that the following assignments completely overwrite
3716 old_top when old_size was previously MINSIZE. This is
3717 intentional. We need the fencepost, even if old_top otherwise gets
3718 lost.
3719 */
3720 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3721 it's marked inuse it will never be coalesced
3722 */
3723 fencepost = cireg_getfree();
3724 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3725 old_size);
3726 fencepost->size = 16|INUSE|PREV_INUSE;
3727 hashtable_add(fencepost);
3728 /*
3729 If possible, release the rest, suppressing trimming.
3730 */
3731 if (old_size >= MINSIZE) {
3732 INTERNAL_SIZE_T tt = av->trim_threshold;
3733#ifdef DNMALLOC_DEBUG
3734 fprintf(stderr, "Release\n");
3735#endif
3736 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3737 set_head(old_top, old_size | PREV_INUSE | INUSE);
3738 guard_set(av->guard_stored, old_top, 0, old_size);
[172]3739 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
[171]3740 fREe(chunk(old_top));
3741 av->trim_threshold = tt;
3742#ifdef DNMALLOC_DEBUG
3743 fprintf(stderr, "Release done\n");
3744#endif
3745 }
3746
3747#ifdef DNMALLOC_DEBUG
3748 fprintf(stderr, "Adjust top, size %lu\n",
3749 (unsigned long)chunksize(av->top));
3750#endif
3751
3752 } /* fenceposts */
3753 } /* adjust top */
3754 } /* not extended previous region */
3755
3756 /* Update statistics */ /* FIXME check this */
3757 sum = av->sbrked_mem;
3758 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3759 av->max_sbrked_mem = sum;
3760
3761 sum += av->mmapped_mem;
3762 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3763 av->max_total_mem = sum;
3764
3765 check_malloc_state();
3766
3767 /* finally, do the allocation */
3768
3769 p = av->top;
3770 size = chunksize(p);
3771
3772#ifdef DNMALLOC_DEBUG
3773 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3774 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3775#endif
3776
3777 /* check that one of the above allocation paths succeeded */
3778 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3779 remainder_size = size - nb;
3780 remainder = cireg_getfree();
3781 remainder->chunk = chunk_at_offset(chunk(p), nb);
3782 av->top = remainder;
3783 set_head(p, nb | PREV_INUSE | INUSE);
3784 set_head(remainder, remainder_size | PREV_INUSE);
3785 hashtable_insert (p, av->top);
3786 check_malloced_chunk(p, nb);
3787#ifdef DNMALLOC_DEBUG
3788 fprintf(stderr, "Return any (total %lu)\n",
3789 (unsigned long)/* size_t */av->max_total_mem );
3790#endif
3791 return chunk(p);
3792 }
3793
3794 }
3795
3796#ifdef DNMALLOC_DEBUG
3797 fprintf(stderr, "Return failed (total %lu)\n",
3798 (unsigned long)/* size_t */av->max_total_mem );
3799#endif
3800
3801 /* catch all failure paths */
3802 MALLOC_FAILURE_ACTION;
3803 return 0;
3804}
3805
3806
3807
3808
3809/*
3810 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3811 to the system (via negative arguments to sbrk) if there is unused
3812 memory at the `high' end of the malloc pool. It is called
3813 automatically by free() when top space exceeds the trim
3814 threshold. It is also called by the public malloc_trim routine. It
3815 returns 1 if it actually released any memory, else 0.
3816*/
3817
3818#if __STD_C
3819static int sYSTRIm(size_t pad, mstate av)
3820#else
3821static int sYSTRIm(pad, av) size_t pad; mstate av;
3822#endif
3823{
3824 long top_size; /* Amount of top-most memory */
3825 long extra; /* Amount to release */
3826 long released; /* Amount actually released */
3827 char* current_brk; /* address returned by pre-check sbrk call */
3828 char* new_brk; /* address returned by post-check sbrk call */
3829 size_t pagesz;
3830
3831 pagesz = av->pagesize;
3832 top_size = chunksize(av->top);
3833
3834 /* Release in pagesize units, keeping at least one page */
3835 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3836
3837 if (extra > 0) {
3838
3839 /*
3840 Only proceed if end of memory is where we last set it.
3841 This avoids problems if there were foreign sbrk calls.
3842 */
3843 current_brk = (char*)(MORECORE(0));
3844 if (current_brk == (char*)(av->top) + top_size) {
3845
3846 /*
3847 Attempt to release memory. We ignore MORECORE return value,
3848 and instead call again to find out where new end of memory is.
3849 This avoids problems if first call releases less than we asked,
3850 of if failure somehow altered brk value. (We could still
3851 encounter problems if it altered brk in some very bad way,
3852 but the only thing we can do is adjust anyway, which will cause
3853 some downstream failure.)
3854 */
3855
3856 MORECORE(-extra);
3857 new_brk = (char*)(MORECORE(0));
3858
3859 if (new_brk != (char*)MORECORE_FAILURE) {
3860 released = (long)(current_brk - new_brk);
3861
3862 if (released != 0) {
3863 /* Success. Adjust top. */
3864 av->sbrked_mem -= released;
3865 set_head(av->top, (top_size - released) | PREV_INUSE);
3866 check_malloc_state();
3867 return 1;
3868 }
3869 }
3870 }
3871 }
3872 return 0;
3873}
3874
3875/*
3876 ------------------------------ malloc ------------------------------
3877*/
3878
3879
3880#if __STD_C
3881DL_STATIC Void_t* mALLOc(size_t bytes)
3882#else
3883DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3884#endif
3885{
3886 mstate av = get_malloc_state();
3887
3888 INTERNAL_SIZE_T nb; /* normalized request size */
3889 unsigned int idx; /* associated bin index */
3890 mbinptr bin; /* associated bin */
3891 mfastbinptr* fb; /* associated fastbin */
3892
3893 chunkinfoptr victim; /* inspected/selected chunk */
3894 INTERNAL_SIZE_T size; /* its size */
3895 int victim_index; /* its bin index */
3896
3897 chunkinfoptr remainder; /* remainder from a split */
3898 CHUNK_SIZE_T remainder_size; /* its size */
3899
3900 unsigned int block; /* bit map traverser */
3901 unsigned int bit; /* bit map traverser */
3902 unsigned int map; /* current word of binmap */
3903
3904 chunkinfoptr fwd; /* misc temp for linking */
3905 chunkinfoptr bck; /* misc temp for linking */
3906
3907 Void_t* retval;
3908
3909 /* chunkinfoptr next; */
3910
3911
3912 /*
3913 Convert request size to internal form by adding SIZE_SZ bytes
3914 overhead plus possibly more to obtain necessary alignment and/or
3915 to obtain a size of at least MINSIZE, the smallest allocatable
3916 size. Also, checked_request2size traps (returning 0) request sizes
3917 that are so large that they wrap around zero when padded and
3918 aligned.
3919 */
3920#if defined(SH_CUTEST)
3921 extern int malloc_count;
3922 ++malloc_count;
3923#endif
3924
3925 checked_request2size(bytes, nb);
3926
3927 /*
3928 Bypass search if no frees yet
3929 */
3930 if (av && have_anychunks(av)) {
3931 goto av_initialized;
3932 }
3933 else {
3934 if (!av || av->max_fast == 0) { /* initialization check */
3935 malloc_consolidate(av);
3936 av = get_malloc_state();
3937 }
3938 goto use_top;
3939 }
3940
3941 av_initialized:
3942
3943 /*
3944 If the size qualifies as a fastbin, first check corresponding bin.
3945 */
3946 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3947 fb = &(av->fastbins[(fastbin_index(nb))]);
3948 if ( (victim = *fb) != 0) {
3949 *fb = victim->fd;
3950 check_remalloced_chunk(victim, nb);
3951 guard_set(av->guard_stored, victim, bytes, nb);
[172]3952 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3953 return chunk(victim);
3954 }
3955 }
3956
3957 /*
3958 If a small request, check regular bin. Since these "smallbins"
3959 hold one size each, no searching within bins is necessary.
3960 (For a large request, we need to wait until unsorted chunks are
3961 processed to find best fit. But for small ones, fits are exact
3962 anyway, so we can check now, which is faster.)
3963 */
3964
3965 if (in_smallbin_range(nb)) {
3966 idx = smallbin_index(nb);
3967 bin = bin_at(av,idx);
3968
3969 if ((victim = last(bin)) != bin) {
3970 bck = victim->bk;
3971 bin->bk = bck;
3972 bck->fd = bin;
3973
3974 set_all_inuse(victim);
3975
3976 check_malloced_chunk(victim, nb);
3977 guard_set(av->guard_stored, victim, bytes, nb);
[172]3978 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3979 return chunk(victim);
3980 }
3981 }
3982
3983 /*
3984 If this is a large request, consolidate fastbins before continuing.
3985 While it might look excessive to kill all fastbins before
3986 even seeing if there is space available, this avoids
3987 fragmentation problems normally associated with fastbins.
3988 Also, in practice, programs tend to have runs of either small or
3989 large requests, but less often mixtures, so consolidation is not
3990 invoked all that often in most programs. And the programs that
3991 it is called frequently in otherwise tend to fragment.
3992 */
3993
3994 else {
3995 idx = largebin_index(nb);
3996 if (have_fastchunks(av))
3997 malloc_consolidate(av);
3998 }
3999
4000 /*
4001 Process recently freed or remaindered chunks, taking one only if
4002 it is exact fit, or, if this a small request, the chunk is remainder from
4003 the most recent non-exact fit. Place other traversed chunks in
4004 bins. Note that this step is the only place in any routine where
4005 chunks are placed in bins.
4006 */
4007
4008 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4009 bck = victim->bk;
4010 size = chunksize(victim);
4011
4012 /*
4013 If a small request, try to use last remainder if it is the
4014 only chunk in unsorted bin. This helps promote locality for
4015 runs of consecutive small requests. This is the only
4016 exception to best-fit, and applies only when there is
4017 no exact fit for a small chunk.
4018 */
4019
4020 if (UNLIKELY(in_smallbin_range(nb) &&
4021 bck == unsorted_chunks(av) &&
4022 victim == av->last_remainder &&
4023 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4024
4025 /* split and reattach remainder */
4026 remainder_size = size - nb;
4027 remainder = cireg_getfree();
4028 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4029 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4030 av->last_remainder = remainder;
4031 remainder->bk = remainder->fd = unsorted_chunks(av);
4032
4033 set_head(victim, nb | PREV_INUSE|INUSE);
4034 set_head(remainder, remainder_size | PREV_INUSE);
4035 hashtable_insert(victim, remainder);
4036
4037 check_malloced_chunk(victim, nb);
4038 guard_set(av->guard_stored, victim, bytes, nb);
[172]4039 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4040 return chunk(victim);
4041 }
4042
4043 /* remove from unsorted list */
4044 unsorted_chunks(av)->bk = bck;
4045 bck->fd = unsorted_chunks(av);
4046
4047 /* Take now instead of binning if exact fit */
4048
4049 if (UNLIKELY(size == nb)) {
4050 set_all_inuse(victim)
4051 check_malloced_chunk(victim, nb);
4052 guard_set(av->guard_stored, victim, bytes, nb);
[172]4053 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4054 return chunk(victim);
4055 }
4056
4057 /* place chunk in bin */
4058
4059 if (in_smallbin_range(size)) {
4060
4061 victim_index = smallbin_index(size);
4062 bck = bin_at(av, victim_index);
4063 fwd = bck->fd;
4064 }
4065 else {
4066 victim_index = largebin_index(size);
4067 bck = bin_at(av, victim_index);
4068 fwd = bck->fd;
4069
4070 if (UNLIKELY(fwd != bck)) {
4071 /* if smaller than smallest, place first */
4072 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4073 fwd = bck;
4074 bck = bck->bk;
4075 }
4076 else if ((CHUNK_SIZE_T)(size) >=
4077 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4078
4079 /* maintain large bins in sorted order */
4080 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4081 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4082 fwd = fwd->fd;
4083 bck = fwd->bk;
4084 }
4085 }
4086 }
4087
4088 mark_bin(av, victim_index);
4089 victim->bk = bck;
4090 victim->fd = fwd;
4091 fwd->bk = victim;
4092 bck->fd = victim;
4093 }
4094
4095 /*
4096 If a large request, scan through the chunks of current bin to
4097 find one that fits. (This will be the smallest that fits unless
4098 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4099 the only step where an unbounded number of chunks might be
4100 scanned without doing anything useful with them. However the
4101 lists tend to be short.
4102 */
4103
4104 if (!in_smallbin_range(nb)) {
4105 bin = bin_at(av, idx);
4106
4107 victim = last(bin);
4108
4109 if (UNLIKELY(victim != bin)) {
4110
4111 do {
4112 size = chunksize(victim);
4113
4114 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4115 remainder_size = size - nb;
4116 unlink(victim, bck, fwd);
4117
4118 /* Split */
4119 if (remainder_size >= MINSIZE) {
4120 remainder = cireg_getfree();
4121 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4122 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4123 remainder->bk = remainder->fd = unsorted_chunks(av);
4124 set_head(victim, nb | PREV_INUSE | INUSE);
4125 set_head(remainder, remainder_size | PREV_INUSE);
4126 hashtable_insert(victim, remainder);
4127 check_malloced_chunk(victim, nb);
4128 guard_set(av->guard_stored, victim, bytes, nb);
[172]4129 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4130 return chunk(victim);
4131 }
4132 /* Exhaust */
4133 else {
4134 set_all_inuse(victim);
4135 check_malloced_chunk(victim, nb);
4136 guard_set(av->guard_stored, victim, bytes, nb);
[172]4137 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4138 return chunk(victim);
4139 }
4140 }
4141 victim = victim->bk;
4142 } while(victim != bin);
4143 }
4144 }
4145
4146 /*
4147 Search for a chunk by scanning bins, starting with next largest
4148 bin. This search is strictly by best-fit; i.e., the smallest
4149 (with ties going to approximately the least recently used) chunk
4150 that fits is selected.
4151
4152 The bitmap avoids needing to check that most blocks are nonempty.
4153 */
4154
4155
4156 ++idx;
4157 bin = bin_at(av,idx);
4158 block = idx2block(idx);
4159 map = av->binmap[block];
4160 bit = idx2bit(idx);
4161
4162 for (;;) {
4163
4164 /* Skip rest of block if there are no more set bits in this block. */
4165 if (bit > map || bit == 0) {
4166 do {
4167 if (++block >= BINMAPSIZE) /* out of bins */
4168 goto use_top;
4169 } while ( (map = av->binmap[block]) == 0);
4170
4171 bin = bin_at(av, (block << BINMAPSHIFT));
4172 bit = 1;
4173 }
4174
4175 /* Advance to bin with set bit. There must be one. */
4176 while ((bit & map) == 0) {
4177 bin = next_bin(bin);
4178 bit <<= 1;
4179 assert(bit != 0);
4180 }
4181
4182 /* Inspect the bin. It is likely to be non-empty */
4183 victim = last(bin);
4184
4185 /* If a false alarm (empty bin), clear the bit. */
4186 if (victim == bin) {
4187 av->binmap[block] = map &= ~bit; /* Write through */
4188 bin = next_bin(bin);
4189 bit <<= 1;
4190 }
4191
4192 else {
4193 size = chunksize(victim);
4194
4195 /* We know the first chunk in this bin is big enough to use. */
4196 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4197
4198 remainder_size = size - nb;
4199
4200 /* unlink */
4201 bck = victim->bk;
4202 bin->bk = bck;
4203 bck->fd = bin;
4204
4205 /* Split */
4206 if (remainder_size >= MINSIZE) {
4207 remainder = cireg_getfree();
4208 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4209
4210 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4211 remainder->bk = remainder->fd = unsorted_chunks(av);
4212 /* advertise as last remainder */
[172]4213 if (in_smallbin_range(nb))
4214 av->last_remainder = remainder;
[171]4215
4216 set_head(victim, nb | PREV_INUSE | INUSE);
4217 set_head(remainder, remainder_size | PREV_INUSE);
4218 hashtable_insert(victim, remainder);
4219 check_malloced_chunk(victim, nb);
4220 guard_set(av->guard_stored, victim, bytes, nb);
[172]4221 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4222 return chunk(victim);
4223 }
4224 /* Exhaust */
4225 else {
4226 set_all_inuse(victim);
4227 check_malloced_chunk(victim, nb);
4228 guard_set(av->guard_stored, victim, bytes, nb);
[172]4229 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4230 return chunk(victim);
4231 }
4232
4233 }
4234 }
4235
4236 use_top:
4237
4238
4239 /*
4240 If large enough, split off the chunk bordering the end of memory
4241 (held in av->top). Note that this is in accord with the best-fit
4242 search rule. In effect, av->top is treated as larger (and thus
4243 less well fitting) than any other available chunk since it can
4244 be extended to be as large as necessary (up to system
4245 limitations).
4246
4247 We require that av->top always exists (i.e., has size >=
4248 MINSIZE) after initialization, so if it would otherwise be
4249 exhuasted by current request, it is replenished. (The main
4250 reason for ensuring it exists is that we may need MINSIZE space
4251 to put in fenceposts in sysmalloc.)
4252 */
4253
4254 victim = av->top;
4255 size = chunksize(victim);
4256
4257 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
[172]4258 remainder = cireg_getfree();
[171]4259 remainder_size = size - nb;
4260 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4261 av->top = remainder;
4262 set_head(victim, nb | PREV_INUSE | INUSE);
4263 set_head(remainder, remainder_size | PREV_INUSE);
4264 hashtable_insert(victim, remainder);
4265 check_malloced_chunk(victim, nb);
4266 guard_set(av->guard_stored, victim, bytes, nb);
[172]4267 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4268 return chunk(victim);
4269 }
4270
4271 /*
4272 If no space in top, relay to handle system-dependent cases
4273 */
4274 retval = sYSMALLOc(nb, av);
4275 if (retval) {
4276 victim = mem2chunk(retval);
4277 guard_set(av->guard_stored, victim, bytes, nb);
4278 }
[172]4279 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
[171]4280 return retval;
4281}
4282
4283/*
4284 ------------------------------ free ------------------------------
4285*/
4286
4287#if __STD_C
4288DL_STATIC void fREe(Void_t* mem)
4289#else
4290DL_STATIC void fREe(mem) Void_t* mem;
4291#endif
4292{
4293 mstate av = get_malloc_state();
4294
4295 chunkinfoptr p; /* chunk corresponding to mem */
4296 INTERNAL_SIZE_T size; /* its size */
4297 mfastbinptr* fb; /* associated fastbin */
4298 chunkinfoptr prevchunk; /* previous physical chunk */
4299 chunkinfoptr nextchunk; /* next contiguous chunk */
4300 INTERNAL_SIZE_T nextsize; /* its size */
4301 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4302 chunkinfoptr bck; /* misc temp for linking */
4303 chunkinfoptr fwd; /* misc temp for linking */
4304 chunkinfoptr next;
4305#if defined(SH_CUTEST)
4306 extern int malloc_count;
4307 --malloc_count;
4308#endif
4309
4310 /* free(0) has no effect */
4311 if (mem != 0) {
4312 p = hashtable_lookup(mem);
4313 /* check that memory is managed by us
4314 * and is inuse
4315 */
4316 if (UNLIKELY(!p || !inuse(p)))
4317 {
4318#ifdef DNMALLOC_CHECKS
4319 if (p) {
4320 fprintf(stderr, "Attempt to free memory not in use\n");
4321 abort();
4322 } else {
4323 fprintf(stderr, "Attempt to free memory not allocated\n");
4324 abort();
4325 }
4326#endif
4327 assert(p && inuse(p));
4328 return;
4329 }
4330
[172]4331 VALGRIND_FREELIKE_BLOCK(mem, 0);
4332
[171]4333 guard_check(av->guard_stored, p);
[172]4334
[171]4335 size = chunksize(p);
4336
4337 check_inuse_chunk(p);
4338
4339 /*
4340 If eligible, place chunk on a fastbin so it can be found
4341 and used quickly in malloc.
4342 */
4343
4344 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4345
4346#if TRIM_FASTBINS
4347 /*
4348 If TRIM_FASTBINS set, don't place chunks
4349 bordering top into fastbins
4350 */
4351 && (chunk_at_offset(chunk(p), size) != av->top)
4352#endif
4353 ) {
4354
4355 set_fastchunks(av);
4356 fb = &(av->fastbins[fastbin_index(size)]);
4357 p->fd = *fb;
4358 *fb = p;
4359 }
4360
4361 /*
4362 Consolidate other non-mmapped chunks as they arrive.
4363 */
4364
4365 else if (!chunk_is_mmapped(p)) {
4366 set_anychunks(av);
4367
4368 nextchunk = next_chunkinfo(p);
4369 if (nextchunk)
4370 nextsize = chunksize(nextchunk);
4371 else
4372 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4373
4374 /* consolidate backward */
4375 if (UNLIKELY(!prev_inuse(p))) {
4376 prevchunk = prev_chunkinfo(p);
4377 prevsize = chunksize(prevchunk);
4378#ifdef DNMALLOC_CHECKS
4379 if (inuse(prevchunk)) {
4380 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4381 abort();
4382 }
4383#else
4384 assert(!inuse(prevchunk));
4385#endif
4386 size += prevsize;
4387 unlink(prevchunk, bck, fwd);
4388 set_head(p, size | PREV_INUSE);
4389 hashtable_skiprm(prevchunk,p);
4390 /* This chunk no longer exists in any form: release the chunkinfoptr
4391 */
4392 freecilst_add(p);
4393 p = prevchunk;
4394 }
4395
4396 if (nextchunk) {
4397 if (nextchunk != av->top) {
4398 /* get and clear inuse bit */
4399 clear_previnuse(nextchunk);
4400
4401 /* consolidate forward */
4402 if (!inuse(nextchunk)) {
4403 unlink(nextchunk, bck, fwd);
4404 size += nextsize;
4405 set_head(p, size | PREV_INUSE);
4406 hashtable_skiprm(p, nextchunk);
4407 freecilst_add (nextchunk);
4408 }
4409
4410 set_head(p, size | PREV_INUSE);
4411 next = next_chunkinfo(p);
4412 if (next)
4413 next->prev_size = size;
4414
4415 /*
4416 Place the chunk in unsorted chunk list. Chunks are
4417 not placed into regular bins until after they have
4418 been given one chance to be used in malloc.
4419 */
4420
4421 bck = unsorted_chunks(av);
4422 fwd = bck->fd;
4423 p->bk = bck;
4424 p->fd = fwd;
4425 bck->fd = p;
4426 fwd->bk = p;
4427
4428 nextchunk = next_chunkinfo(p);
4429 if (nextchunk)
4430 nextchunk->prev_size = chunksize(p);
4431
4432 check_free_chunk(p);
4433 }
4434
4435 /*
4436 If the chunk borders the current high end of memory,
4437 consolidate into top
4438 */
4439
4440 else {
4441 size += nextsize;
4442 set_head(p, size | PREV_INUSE);
4443 hashtable_remove(chunk(av->top));
4444 freecilst_add(av->top);
4445 av->top = p;
4446 check_chunk(p);
4447 }
4448 } /* if (nextchunk) */
4449
4450 /*
4451 If freeing a large space, consolidate possibly-surrounding
4452 chunks. Then, if the total unused topmost memory exceeds trim
4453 threshold, ask malloc_trim to reduce top.
4454
4455 Unless max_fast is 0, we don't know if there are fastbins
4456 bordering top, so we cannot tell for sure whether threshold
4457 has been reached unless fastbins are consolidated. But we
4458 don't want to consolidate on each free. As a compromise,
4459 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4460 is reached.
4461 */
4462
4463 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4464 if (have_fastchunks(av))
4465 malloc_consolidate(av);
4466
4467#ifndef MORECORE_CANNOT_TRIM
4468 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4469 (CHUNK_SIZE_T)(av->trim_threshold))
4470 {
4471 if (morecore32bit(av))
4472 {
4473#ifdef DNMALLOC_DEBUG
4474 fprintf(stderr, "Calling systrim from free()\n");
4475#endif
4476 sYSTRIm(av->top_pad, av);
4477#ifdef DNMALLOC_DEBUG
4478 fprintf(stderr, "Systrim done\n");
4479#endif
4480 }
4481 }
4482#endif
4483 }
4484
4485 }
4486 /*
4487 If the chunk was allocated via mmap, release via munmap()
4488 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4489 true, then user must have overwritten memory. There's nothing
4490 we can do to catch this error unless DEBUG is set, in which case
4491 check_inuse_chunk (above) will have triggered error.
4492 */
4493
4494 else {
4495 int ret;
4496 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4497 av->n_mmaps--;
4498 av->mmapped_mem -= (size + offset);
4499 ret = munmap((char*) chunk(p) - offset, size + offset);
4500 hashtable_remove_mmapped(chunk(p));
4501 freecilst_add(p);
4502 /* munmap returns non-zero on failure */
4503 assert(ret == 0);
4504 }
4505 }
4506}
4507
4508/*
4509 ------------------------- malloc_consolidate -------------------------
4510
4511 malloc_consolidate is a specialized version of free() that tears
4512 down chunks held in fastbins. Free itself cannot be used for this
4513 purpose since, among other things, it might place chunks back onto
4514 fastbins. So, instead, we need to use a minor variant of the same
4515 code.
4516
4517 Also, because this routine needs to be called the first time through
4518 malloc anyway, it turns out to be the perfect place to trigger
4519 initialization code.
4520*/
4521
4522#if __STD_C
4523static void malloc_consolidate(mstate av)
4524#else
4525static void malloc_consolidate(av) mstate av;
4526#endif
4527{
4528 mfastbinptr* fb; /* current fastbin being consolidated */
4529 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4530 chunkinfoptr p; /* current chunk being consolidated */
4531 chunkinfoptr nextp; /* next chunk to consolidate */
4532 chunkinfoptr prevp;
4533 chunkinfoptr unsorted_bin; /* bin header */
4534 chunkinfoptr first_unsorted; /* chunk to link to */
4535
4536 /* These have same use as in free() */
4537 chunkinfoptr nextchunk;
4538 INTERNAL_SIZE_T size;
4539 INTERNAL_SIZE_T nextsize;
4540 INTERNAL_SIZE_T prevsize;
4541 chunkinfoptr bck;
4542 chunkinfoptr fwd;
4543 chunkinfoptr next;
4544
4545 /*
4546 If max_fast is 0, we know that av hasn't
4547 yet been initialized, in which case do so below
4548 */
4549 if (av && av->max_fast != 0) {
[172]4550
[171]4551 clear_fastchunks(av);
4552
4553 unsorted_bin = unsorted_chunks(av);
4554
4555 /*
4556 Remove each chunk from fast bin and consolidate it, placing it
4557 then in unsorted bin. Among other reasons for doing this,
4558 placing in unsorted bin avoids needing to calculate actual bins
4559 until malloc is sure that chunks aren't immediately going to be
4560 reused anyway.
4561 */
4562
4563 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4564 fb = &(av->fastbins[0]);
4565 do {
4566 if ( UNLIKELY((p = *fb) != 0)) {
4567 *fb = 0;
4568 do {
4569 check_inuse_chunk(p);
4570 nextp = p->fd;
4571
4572 /*
4573 * Slightly streamlined version of consolidation code in free()
4574 */
4575
4576 size = chunksize(p);
4577 nextchunk = next_chunkinfo(p);
4578
4579 /* gcc doesn't notice that it's only used if (nextchunk) */
4580 if (nextchunk)
4581 nextsize = chunksize(nextchunk);
4582 else
4583 nextsize = 0;
4584
4585 if (!prev_inuse(p)) {
4586 prevp = prev_chunkinfo(p);
4587 prevsize = chunksize(prevp);
4588 size += prevsize;
4589#ifdef DNMALLOC_CHECKS
4590 if (inuse(prevp)) {
4591 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4592 abort();
4593 }
4594#else
4595 assert(!inuse(prevp));
4596#endif
4597 unlink(prevp, bck, fwd);
4598 set_head(p, size | PREV_INUSE);
4599 hashtable_skiprm(prevp,p);
4600 freecilst_add(p);
4601 p=prevp;
4602 }
4603
4604 if (nextchunk) {
4605 if (nextchunk != av->top) {
4606
4607 clear_previnuse(nextchunk);
4608
[172]4609 /* if mmap is used instead of sbrk, we may have a
4610 * chunk with !nextchunk->fd && !nextchunk->bk
4611 */
[171]4612 if (!inuse(nextchunk)) {
[172]4613 if( nextchunk->fd && nextchunk->bk) {
4614 size += nextsize;
4615 unlink(nextchunk, bck, fwd);
4616 set_head(p, size | PREV_INUSE);
4617 hashtable_skiprm(p,nextchunk);
4618 freecilst_add(nextchunk);
4619 }
[171]4620 }
4621
4622 first_unsorted = unsorted_bin->fd;
4623 unsorted_bin->fd = p;
4624 first_unsorted->bk = p;
4625
4626 set_head(p, size | PREV_INUSE);
4627 p->bk = unsorted_bin;
4628 p->fd = first_unsorted;
4629 next = next_chunkinfo(p);
4630 if (next)
4631 next->prev_size = size;
4632
4633
4634 }
4635
[172]4636 else if (nextchunk == av->top) {
[171]4637 size += nextsize;
4638 set_head(p, size | PREV_INUSE);
4639 hashtable_remove(chunk(av->top));
4640 freecilst_add(av->top);
4641 av->top = p;
4642 }
[172]4643 } /* if (nextchunk) */
[171]4644
4645 } while ( (p = nextp) != 0);
4646
4647 }
4648 } while (fb++ != maxfb);
4649 }
4650 else {
4651 // Initialize dnmalloc
4652 dnmalloc_init();
4653 malloc_init_state(get_malloc_state());
4654 check_malloc_state();
4655 }
4656}
4657
4658/*
4659 ------------------------------ realloc ------------------------------
4660*/
4661
4662
4663#if __STD_C
4664DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4665#else
4666DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4667#endif
4668{
4669 mstate av = get_malloc_state();
4670
4671 INTERNAL_SIZE_T nb; /* padded request size */
4672
4673 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4674 INTERNAL_SIZE_T oldsize; /* its size */
4675
4676 chunkinfoptr newp; /* chunk to return */
4677 INTERNAL_SIZE_T newsize; /* its size */
4678 Void_t* newmem; /* corresponding user mem */
4679
4680 chunkinfoptr next; /* next contiguous chunk after oldp */
4681
4682 chunkinfoptr remainder; /* extra space at end of newp */
4683 CHUNK_SIZE_T remainder_size; /* its size */
4684
4685 chunkinfoptr bck; /* misc temp for linking */
4686 chunkinfoptr fwd; /* misc temp for linking */
4687
4688 CHUNK_SIZE_T copysize; /* bytes to copy */
4689 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4690 INTERNAL_SIZE_T* s; /* copy source */
4691 INTERNAL_SIZE_T* d; /* copy destination */
4692
4693
4694#ifdef REALLOC_ZERO_BYTES_FREES
4695 if (UNLIKELY(bytes == 0)) {
4696 fREe(oldmem);
4697 return 0;
4698 }
4699#endif
4700
4701 if (UNLIKELY(!av || av->max_fast == 0)) {
4702 malloc_consolidate(av);
4703 av = get_malloc_state();
4704 }
4705
4706 /* realloc of null is supposed to be same as malloc */
4707 if (UNLIKELY(oldmem == 0))
4708 return mALLOc(bytes);
4709
4710 checked_request2size(bytes, nb);
4711
4712 oldp = hashtable_lookup(oldmem);
4713
4714 if (UNLIKELY(!oldp || !inuse(oldp))){
4715 /* attempt to either realloc memory not managed by us
4716 * or memory that is not in use
4717 */
4718#ifdef DNMALLOC_CHECKS
4719 if (oldp) {
4720 fprintf(stderr, "Attempt to free memory not in use\n");
4721 abort();
4722 } else {
4723 fprintf(stderr, "Attempt to free memory not allocated\n");
4724 abort();
4725 }
4726#endif
4727 assert(oldp && inuse(oldp));
4728 return 0;
4729 }
4730
[172]4731 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
[171]4732 guard_check(av->guard_stored, oldp);
4733
4734 oldsize = chunksize(oldp);
4735
4736 check_inuse_chunk(oldp);
4737
4738 if (!chunk_is_mmapped(oldp)) {
4739
4740 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4741 /* already big enough; split below */
4742 newp = oldp;
4743 newsize = oldsize;
4744 }
4745
4746 else {
4747 next = next_chunkinfo(oldp);
4748 if (next)
4749 next->prev_size = oldsize;
4750 /* Try to expand forward into top */
4751 if (next && next == av->top &&
4752 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4753 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4754 set_head_size(oldp, nb);
4755 hashtable_remove(chunk(av->top));
4756 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4757 set_head(av->top, (newsize - nb) | PREV_INUSE);
4758 /* av->top->chunk has been moved move in hashtable */
4759 hashtable_insert(oldp, av->top);
4760 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4761 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4762 return chunk(oldp);
4763 }
4764
4765 /* Try to expand forward into next chunk; split off remainder below */
4766 else if (next && next != av->top &&
4767 !inuse(next) &&
4768 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4769 (CHUNK_SIZE_T)(nb)) {
4770 newp = oldp;
4771 unlink(next, bck, fwd);
4772 hashtable_remove(chunk(next));
4773 freecilst_add(next);
4774 next = next_chunkinfo(oldp);
4775 if (next)
4776 next->prev_size = newsize;
4777 }
4778
4779 /* allocate, copy, free */
4780 else {
4781
4782 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
[172]4783 if (newmem == 0)
[171]4784 return 0; /* propagate failure */
4785
4786 newp = hashtable_lookup(newmem);
4787 newsize = chunksize(newp);
4788
[172]4789
[171]4790 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4791 /*
4792 Avoid copy if newp is next chunk after oldp.
4793 */
4794 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4795 newsize += oldsize;
4796 set_head_size(oldp, newsize);
4797 hashtable_skiprm(oldp, newp);
4798 freecilst_add(newp);
4799 newp = oldp;
4800 }
4801 else {
4802 /*
4803 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4804 We know that contents have an even number of
4805 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4806 */
4807
[172]4808 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4809
[171]4810 copysize = oldsize;
4811 s = (INTERNAL_SIZE_T*)(oldmem);
4812 d = (INTERNAL_SIZE_T*)(newmem);
4813 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4814 assert(ncopies >= 2);
4815
4816 if (ncopies > 10)
4817 MALLOC_COPY(d, s, copysize);
4818
4819 else {
4820 *(d+0) = *(s+0);
4821 *(d+1) = *(s+1);
4822 if (ncopies > 2) {
4823 *(d+2) = *(s+2);
4824 *(d+3) = *(s+3);
4825 if (ncopies > 4) {
4826 *(d+4) = *(s+4);
4827 *(d+5) = *(s+5);
4828 if (ncopies > 6) {
4829 *(d+6) = *(s+6);
4830 *(d+7) = *(s+7);
4831 if (ncopies > 8) {
4832 *(d+8) = *(s+8);
4833 *(d+9) = *(s+9);
4834 }
4835 }
4836 }
4837 }
4838 }
4839
4840 fREe(oldmem);
4841 check_inuse_chunk(newp);
4842 guard_set(av->guard_stored, newp, bytes, nb);
4843 return chunk(newp);
4844 }
4845 }
4846 }
4847
4848 /* If possible, free extra space in old or extended chunk */
4849
4850 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4851
4852 remainder_size = newsize - nb;
4853
4854 if (remainder_size >= MINSIZE) { /* split remainder */
4855 remainder = cireg_getfree();
4856 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4857 set_head_size(newp, nb);
4858 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4859 remainder->prev_size = nb;
4860 hashtable_insert(newp, remainder);
4861 /* Mark remainder as inuse so free() won't complain */
4862 set_all_inuse(remainder);
4863 guard_set(av->guard_stored, remainder, 0, remainder_size);
[172]4864 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
[171]4865 fREe(chunk(remainder));
4866 }
4867 else { /* not enough extra to split off */
4868 set_head_size(newp, newsize);
4869 set_all_inuse(newp);
4870 }
4871
4872 check_inuse_chunk(newp);
4873 guard_set(av->guard_stored, newp, bytes, nb);
[172]4874 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
[171]4875 return chunk(newp);
4876 }
4877
4878 /*
4879 Handle mmap cases
4880 */
4881
4882 else {
4883
4884#if HAVE_MREMAP
4885 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4886 size_t pagemask = av->pagesize - 1;
4887 char *cp;
4888 CHUNK_SIZE_T sum;
4889
[172]4890
[171]4891 /* Note the extra SIZE_SZ overhead */
4892 //newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4893 newsize = (nb + offset + pagemask) & ~pagemask;
4894
4895 /* don't need to remap if still within same page */
4896 if (oldsize == newsize - offset)
4897 {
4898 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4899 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4900 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
[171]4901 return oldmem;
4902 }
4903
4904 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4905
4906 if (cp != (char*)MORECORE_FAILURE) {
4907
4908 hashtable_remove_mmapped(chunk(oldp));
4909
4910 oldp->chunk = (mchunkptr)(cp + offset);
4911 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4912
4913 hashtable_add(oldp);
4914
4915 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4916 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4917
4918 /* update statistics */
4919 sum = av->mmapped_mem += newsize - oldsize;
4920 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4921 av->max_mmapped_mem = sum;
4922 sum += av->sbrked_mem;
4923 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4924 av->max_total_mem = sum;
4925
4926 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4927 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4928 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4929 return chunk(oldp);
4930 }
4931#endif /* have MREMAP */
4932
4933 /* Note the extra SIZE_SZ overhead. */
4934 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4935 newmem = oldmem; /* do nothing */
4936 else {
4937 /* Must alloc, copy, free. */
4938 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4939 if (newmem != 0) {
4940 MALLOC_COPY(newmem, oldmem, oldsize);
4941 fREe(oldmem);
4942 }
4943 }
[172]4944 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
[171]4945 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4946 return newmem;
4947 }
4948}
4949
4950/*
4951 ---------------------------posix_memalign ----------------------------
4952*/
4953
4954#if __STD_C
4955DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4956#else
4957DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4958#endif
4959{
4960 mstate av;
4961
4962 if (alignment % sizeof(void *) != 0)
4963 return EINVAL;
4964 if ((alignment & (alignment - 1)) != 0)
4965 return EINVAL;
4966
4967 av = get_malloc_state();
4968 if (!av || av->max_fast == 0) malloc_consolidate(av);
4969 *memptr = mEMALIGn(alignment, bytes);
4970
4971 return (*memptr != NULL ? 0 : ENOMEM);
4972}
4973
4974/*
4975 ------------------------------ memalign ------------------------------
4976*/
4977
4978#if __STD_C
4979DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
4980#else
4981DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4982#endif
4983{
4984 INTERNAL_SIZE_T nb; /* padded request size */
4985 char* m; /* memory returned by malloc call */
4986 chunkinfoptr p; /* corresponding chunk */
4987 char* brk; /* alignment point within p */
4988 chunkinfoptr newp; /* chunk to return */
4989 INTERNAL_SIZE_T newsize; /* its size */
4990 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4991 chunkinfoptr remainder; /* spare room at end to split off */
4992 CHUNK_SIZE_T remainder_size; /* its size */
4993 INTERNAL_SIZE_T size;
4994 mstate av;
4995
4996 /* If need less alignment than we give anyway, just relay to malloc */
4997
4998 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
4999
5000 /* Otherwise, ensure that it is at least a minimum chunk size */
5001
5002 if (alignment < MINSIZE) alignment = MINSIZE;
5003
5004 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5005 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5006 size_t a = MALLOC_ALIGNMENT * 2;
5007 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5008 alignment = a;
5009 }
5010
5011 checked_request2size(bytes, nb);
5012
5013 /*
5014 Strategy: find a spot within that chunk that meets the alignment
5015 request, and then possibly free the leading and trailing space.
5016 */
5017
5018
5019 /* Call malloc with worst case padding to hit alignment. */
5020
5021 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5022
5023 if (m == 0) return 0; /* propagate failure */
5024
5025 av = get_malloc_state();
5026
5027 p = hashtable_lookup((mchunkptr) m);
5028
5029 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5030
5031 /*
5032 Find an aligned spot inside chunk. Since we need to give back
5033 leading space in a chunk of at least MINSIZE, if the first
5034 calculation places us at a spot with less than MINSIZE leader,
5035 we can move to the next aligned spot -- we've allocated enough
5036 total room so that this is always possible.
5037 */
5038
5039 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5040 -((signed long) alignment)));
5041 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5042 brk += alignment;
5043
5044 newp = cireg_getfree();
5045 newp->chunk = (mchunkptr)brk;
5046 leadsize = brk - (char*)(chunk(p));
5047 newsize = chunksize(p) - leadsize;
5048
5049 /* For mmapped chunks, just adjust offset */
5050 if (UNLIKELY(chunk_is_mmapped(p))) {
5051 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5052 set_head(newp, newsize|IS_MMAPPED|INUSE);
5053 hashtable_remove_mmapped(chunk(p));
5054 freecilst_add(p);
5055 hashtable_add(newp);
5056 guard_set(av->guard_stored, newp, bytes, nb);
5057 return chunk(newp);
5058 }
5059
5060 /* Otherwise, give back leader, use the rest */
5061 set_head(newp, newsize | PREV_INUSE | INUSE);
5062 set_head_size(p, leadsize);
5063 set_all_inuse(newp);
5064 hashtable_add(newp); /* 20.05.2008 rw */
5065 guard_set(av->guard_stored, p, 0, leadsize);
5066 fREe(chunk(p));
5067 p = newp;
5068
5069 assert (newsize >= nb &&
5070 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5071 }
5072
5073 /* Also give back spare room at the end */
5074 if (!chunk_is_mmapped(p)) {
5075 size = chunksize(p);
5076 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5077 remainder = cireg_getfree();
5078 remainder_size = size - nb;
5079 remainder->chunk = chunk_at_offset(chunk(p), nb);
5080 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5081 set_head_size(p, nb);
5082 hashtable_add(remainder); /* 20.05.2008 rw */
5083 guard_set(av->guard_stored, remainder, 0, remainder_size);
5084 fREe(chunk(remainder));
5085 }
5086 }
5087
5088 check_inuse_chunk(p);
5089 guard_set(av->guard_stored, p, bytes, nb);
5090 return chunk(p);
5091}
5092
5093/*
5094 ------------------------------ calloc ------------------------------
5095*/
5096
5097#if __STD_C
5098DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5099#else
5100DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5101#endif
5102{
5103 chunkinfoptr p;
5104 CHUNK_SIZE_T clearsize;
5105 CHUNK_SIZE_T nclears;
5106 INTERNAL_SIZE_T* d;
5107 Void_t* mem;
5108
5109
5110 mem = mALLOc(n_elements * elem_size);
5111
5112 if (mem != 0) {
5113 p = hashtable_lookup(mem);
5114
5115 if (!chunk_is_mmapped(p))
5116 {
5117 /*
5118 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5119 We know that contents have an even number of
5120 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5121 */
5122
5123 d = (INTERNAL_SIZE_T*)mem;
5124 clearsize = chunksize(p);
5125 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5126 assert(nclears >= 2);
5127
5128 if (nclears > 10) {
5129 MALLOC_ZERO(d, clearsize);
5130 }
5131
5132 else {
5133 *(d+0) = 0;
5134 *(d+1) = 0;
5135 if (nclears > 2) {
5136 *(d+2) = 0;
5137 *(d+3) = 0;
5138 if (nclears > 4) {
5139 *(d+4) = 0;
5140 *(d+5) = 0;
5141 if (nclears > 6) {
5142 *(d+6) = 0;
5143 *(d+7) = 0;
5144 if (nclears > 8) {
5145 *(d+8) = 0;
5146 *(d+9) = 0;
5147 }
5148 }
5149 }
5150 }
5151 }
5152 }
5153#if ! MMAP_CLEARS
5154 else
5155 {
5156 d = (INTERNAL_SIZE_T*)mem;
5157 clearsize = chunksize(p);
5158 MALLOC_ZERO(d, clearsize);
5159 }
5160#endif
5161 /* Set guard again, since we just cleared it
5162 */
5163 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5164 }
5165
5166 return mem;
5167}
5168
5169/*
5170 ------------------------------ valloc ------------------------------
5171*/
5172
5173#if __STD_C
5174DL_STATIC Void_t* vALLOc(size_t bytes)
5175#else
5176DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5177#endif
5178{
5179 /* Ensure initialization */
5180 mstate av = get_malloc_state();
5181 if (!av || av->max_fast == 0) {
5182 malloc_consolidate(av);
5183 av = get_malloc_state();
5184 }
5185 return mEMALIGn(av->pagesize, bytes);
5186}
5187
5188/*
5189 ------------------------------ pvalloc ------------------------------
5190*/
5191
5192
5193#if __STD_C
5194DL_STATIC Void_t* pVALLOc(size_t bytes)
5195#else
5196DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5197#endif
5198{
5199 mstate av = get_malloc_state();
5200 size_t pagesz;
5201
5202 /* Ensure initialization */
5203 if (!av || av->max_fast == 0) {
5204 malloc_consolidate(av);
5205 av = get_malloc_state();
5206 }
5207 pagesz = av->pagesize;
5208 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5209}
5210
5211
5212/*
5213 ------------------------------ malloc_trim ------------------------------
5214*/
5215
5216#if __STD_C
5217DL_STATIC int mTRIm(size_t pad)
5218#else
5219DL_STATIC int mTRIm(pad) size_t pad;
5220#endif
5221{
5222 mstate av = get_malloc_state();
5223 /* Ensure initialization/consolidation */
5224 malloc_consolidate(av);
5225 av = get_malloc_state();
5226#ifndef MORECORE_CANNOT_TRIM
5227 if (morecore32bit(av))
5228 return sYSTRIm(pad, av);
5229 else
5230 return 0;
5231#else
5232 return 0;
5233#endif
5234}
5235
5236
5237
5238/*
5239 ------------------------- malloc_usable_size -------------------------
5240*/
5241
5242#if __STD_C
5243DL_STATIC size_t mUSABLe(Void_t* mem)
5244#else
5245DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5246#endif
5247{
5248 chunkinfoptr p;
5249 if (mem != 0) {
5250 p = hashtable_lookup(mem);
5251 if (p && inuse(p)) return chunksize(p);
5252 }
5253 return 0;
5254}
5255
5256/*
5257 ------------------------------ mallinfo ------------------------------
5258*/
5259
5260DL_STATIC struct mallinfo mALLINFo()
5261{
5262 mstate av = get_malloc_state();
5263 struct mallinfo mi;
5264 unsigned int i;
5265 mbinptr b;
5266 chunkinfoptr p;
5267 INTERNAL_SIZE_T avail;
5268 INTERNAL_SIZE_T fastavail;
5269 int nblocks;
5270 int nfastblocks;
5271
5272 /* Ensure initialization */
5273 if (!av || av->top == 0) {
5274 malloc_consolidate(av);
5275 av = get_malloc_state();
5276 }
5277 check_malloc_state();
5278
5279 /* Account for top */
5280 avail = chunksize(av->top);
5281 nblocks = 1; /* top always exists */
5282
5283 /* traverse fastbins */
5284 nfastblocks = 0;
5285 fastavail = 0;
5286
5287 for (i = 0; i < NFASTBINS; ++i) {
5288 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5289 ++nfastblocks;
5290 fastavail += chunksize(p);
5291 }
5292 }
5293
5294 avail += fastavail;
5295
5296 /* traverse regular bins */
5297 for (i = 1; i < NBINS; ++i) {
5298 b = bin_at(av, i);
5299 for (p = last(b); p != b; p = p->bk) {
5300 ++nblocks;
5301 avail += chunksize(p);
5302 }
5303 }
5304
5305 mi.smblks = nfastblocks;
5306 mi.ordblks = nblocks;
5307 mi.fordblks = avail;
5308 mi.uordblks = av->sbrked_mem - avail;
5309 mi.arena = av->sbrked_mem;
5310 mi.hblks = av->n_mmaps;
5311 mi.hblkhd = av->mmapped_mem;
5312 mi.fsmblks = fastavail;
5313 mi.keepcost = chunksize(av->top);
5314 mi.usmblks = av->max_total_mem;
5315 return mi;
5316}
5317
5318/*
5319 ------------------------------ malloc_stats ------------------------------
5320*/
5321
5322DL_STATIC void mSTATs()
5323{
5324 struct mallinfo mi = mALLINFo();
5325
5326 fprintf(stderr, "hashtable = %10lu MB\n",
5327 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5328 fprintf(stderr, "max system bytes = %10lu\n",
5329 (CHUNK_SIZE_T)(mi.usmblks));
5330 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5331 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5332 (CHUNK_SIZE_T)(mi.arena),
5333 (CHUNK_SIZE_T)(mi.hblkhd));
5334 fprintf(stderr, "in use bytes = %10lu\n",
5335 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5336
5337}
5338
5339
5340/*
5341 ------------------------------ mallopt ------------------------------
5342*/
5343
5344#if __STD_C
5345DL_STATIC int mALLOPt(int param_number, int value)
5346#else
5347DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5348#endif
5349{
5350 mstate av = get_malloc_state();
5351 /* Ensure initialization/consolidation */
5352 malloc_consolidate(av);
5353 av = get_malloc_state();
5354
5355 switch(param_number) {
5356 case M_MXFAST:
5357 if (value >= 0 && value <= MAX_FAST_SIZE) {
5358 set_max_fast(av, value);
5359 return 1;
5360 }
5361 else
5362 return 0;
5363
5364 case M_TRIM_THRESHOLD:
5365 av->trim_threshold = value;
5366 return 1;
5367
5368 case M_TOP_PAD:
5369 av->top_pad = value;
5370 return 1;
5371
5372 case M_MMAP_THRESHOLD:
5373 av->mmap_threshold = value;
5374 return 1;
5375
5376 case M_MMAP_MAX:
5377 if (value != 0)
5378 return 0;
5379 av->n_mmaps_max = value;
5380 return 1;
5381
5382 default:
5383 return 0;
5384 }
5385}
5386
5387
5388/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5389
5390/*
5391 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5392 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5393 *
5394 * Permission to use, copy, modify, and distribute this software for any
5395 * purpose with or without fee is hereby granted, provided that the above
5396 * copyright notice and this permission notice appear in all copies.
5397 *
5398 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5399 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5400 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5401 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5402 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5403 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5404 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5405 */
5406
5407/*
5408 * Arc4 random number generator for OpenBSD.
5409 *
5410 * This code is derived from section 17.1 of Applied Cryptography,
5411 * second edition, which describes a stream cipher allegedly
5412 * compatible with RSA Labs "RC4" cipher (the actual description of
5413 * which is a trade secret). The same algorithm is used as a stream
5414 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5415 *
5416 * Here the stream cipher has been modified always to include the time
5417 * when initializing the state. That makes it impossible to
5418 * regenerate the same random sequence twice, so this can't be used
5419 * for encryption, but will generate good random numbers.
5420 *
5421 * RC4 is a registered trademark of RSA Laboratories.
5422 */
5423
5424/* Moved u_int8_t -> unsigned char (portability)
5425 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5426 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5427 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5428 * This is arc4random(3) using urandom.
5429 */
5430
5431#include <fcntl.h>
5432#include <limits.h>
5433#include <stdlib.h>
5434#include <sys/param.h>
5435#include <sys/time.h>
5436
5437struct arc4_stream {
5438 unsigned char i;
5439 unsigned char j;
5440 unsigned char s[256];
5441};
5442
5443static int rs_initialized;
5444static struct arc4_stream rs;
5445static pid_t arc4_stir_pid;
5446static int arc4_count;
5447
5448static unsigned char arc4_getbyte(void);
5449
5450static void
5451arc4_init(void)
5452{
5453 int n;
5454
5455 for (n = 0; n < 256; n++)
5456 rs.s[n] = n;
5457 rs.i = 0;
5458 rs.j = 0;
5459}
5460
5461static inline void
5462arc4_addrandom(unsigned char *dat, int datlen)
5463{
5464 int n;
5465 unsigned char si;
5466
5467 rs.i--;
5468 for (n = 0; n < 256; n++) {
5469 rs.i = (rs.i + 1);
5470 si = rs.s[rs.i];
5471 rs.j = (rs.j + si + dat[n % datlen]);
5472 rs.s[rs.i] = rs.s[rs.j];
5473 rs.s[rs.j] = si;
5474 }
5475 rs.j = rs.i;
5476}
5477
5478#ifdef HAVE_SCHED_H
5479#include <sched.h>
5480#endif
5481
5482static void
5483arc4_stir(void)
5484{
5485 int i;
5486 struct {
5487 struct timeval tv1;
5488 struct timeval tv2;
5489 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5490 } rdat;
5491#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5492 size_t sz = 0;
5493 int fd;
5494#endif
5495
5496 gettimeofday(&rdat.tv1, NULL);
5497
5498
5499 if (!rs_initialized) {
5500 arc4_init();
5501 rs_initialized = 1;
5502 }
5503
5504#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5505
5506#ifdef HAVE_SCHED_YIELD
5507 /* Yield the processor to introduce some random delay. */
5508 (void) sched_yield();
5509#endif
5510
5511 /*
5512 * Pthread problem in multithreaded code on *BSD.
5513 */
5514 fd = open("/dev/urandom", O_RDONLY);
5515 if (fd != -1) {
5516 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5517 close(fd);
5518 }
5519 if (sz > sizeof (rdat.rnd))
5520 sz = 0;
5521 #endif
5522
5523 arc4_stir_pid = getpid();
5524 gettimeofday(&rdat.tv2, NULL);
5525
5526 arc4_addrandom((void *)&rdat, sizeof(rdat));
5527
5528 /*
5529 * Discard early keystream, as per recommendations in:
5530 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5531 */
5532 for (i = 0; i < 256; i++)
5533 (void)arc4_getbyte();
5534 arc4_count = 1600000;
5535}
5536
5537static unsigned char
5538arc4_getbyte(void)
5539{
5540 unsigned char si, sj;
5541
5542 rs.i = (rs.i + 1);
5543 si = rs.s[rs.i];
5544 rs.j = (rs.j + si);
5545 sj = rs.s[rs.j];
5546 rs.s[rs.i] = sj;
5547 rs.s[rs.j] = si;
5548 return (rs.s[(si + sj) & 0xff]);
5549}
5550
5551
5552 /* Changed to return char* */
5553static char *
5554dnmalloc_arc4random(void)
5555{
5556 static char val[4];
5557
5558 /* We only call this once, hence no need for locking. */
5559
5560 /* _ARC4_LOCK(); */
5561 arc4_count -= 4;
5562 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5563 arc4_stir();
5564
5565 val[0] = (char) arc4_getbyte();
5566 val[1] = (char) arc4_getbyte();
5567 val[2] = (char) arc4_getbyte();
5568 val[3] = (char) arc4_getbyte();
5569
5570 arc4_stir();
5571 /* _ARC4_UNLOCK(); */
5572 return val;
5573}
5574
5575#else
5576int dnmalloc_pthread_init() { return 0; }
5577#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.