source: trunk/src/dnmalloc.c @ 383

Last change on this file since 383 was 383, checked in by katerina, 10 years ago

Fix for ticket #281 (warnings from clang static analyzer).

File size: 164.7 KB
Line 
1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
2   Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3   and Rainer Wichmann
4
5   The authors can be contacted by:
6      Email: dnmalloc@fort-knox.org
7      Address:
8#if defined(WITH_TPT)
9             Yves Younan
10             Celestijnenlaan 200A
11             B-3001 Heverlee
12             Belgium
13   
14   This library is free software; you can redistribute it and/or
15   modify it under the terms of the GNU Lesser General Public
16   License as published by the Free Software Foundation; either
17   version 2.1 of the License, or (at your option) any later version.
18
19   This library is distributed in the hope that it will be useful,
20   but WITHOUT ANY WARRANTY; without even the implied warranty of
21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22   Lesser General Public License for more details.
23
24   You should have received a copy of the GNU Lesser General Public
25   License along with this library; if not, write to the Free Software
26   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
27   
28*/
29
30/* Current version: dnmalloc 1.0 */
31/* Includes arc4random from OpenBSD, which is under the BDS license     */
32
33/* Versions:
34   0.1-0.5:
35   Proof of concept implementation by Hans Van den Eynden and Yves Younan
36   0.6-0.7:
37   Bug fixes by Yves Younan
38   0.8-1.0.beta4:
39   Reimplementation from scratch by Yves Younan
40   1.0.beta4:
41   Public release
42   1.0.beta5:
43   Prev_chunkinfo speeded up, was really slow because of the way we did lookups
44   A freechunkinfo region is now freed when it is completely empty and
45   not the current one
46
47   1.0 (Rainer Wichmann [support at la dash samhna dot org]):
48   ---------------------
49
50   Compiler warnings fixed
51   Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
52       (and what the standard says)
53   Removed unused code
54   Fix       assert(aligned_OK(chunk(newp)));
55         ->  assert(aligned_OK(chunk(oldp)));
56   Fix statistics in sYSMALLOc
57   Fix overwrite of av->top in sYSMALLOc
58   Provide own assert(), glibc assert() doesn't work (calls malloc)
59   Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
60   Remove cfree, independent_cmalloc, independent_comalloc (untested
61       public functions not covered by any standard)
62   Provide posix_memalign (that one is in the standard)
63   Move the malloc_state struct to mmapped memory protected by guard pages
64   Add arc4random function to initialize random canary on startup
65   Implement random canary at end of (re|m)alloced/memaligned buffer,
66       check at free/realloc
67   Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
68   Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
69
70   Profiling: Reorder branches in hashtable_add, next_chunkinfo,
71                  prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
72                  checked_request2size (gcc predicts if{} branch to be taken).
73              Use UNLIKELY macro (gcc __builtin_expect()) where branch
74                  reordering would make the code awkward.
75
76   Portability: Hashtable always covers full 32bit address space to
77                avoid assumptions about memory layout.
78   Portability: Try hard to enforce mapping of mmapped memory into
79                32bit address space, even on 64bit systems.
80   Portability: Provide a dnmalloc_pthread_init() function, since
81                pthread locking on HP-UX only works if initialized
82                after the application has entered main().
83   Portability: On *BSD, pthread_mutex_lock is unusable since it
84                calls malloc, use spinlocks instead.
85   Portability: Dynamically detect whether the heap is within
86                32bit address range (e.g. on Linux x86_64, it isn't).
87                Don't use sbrk() if the heap is mapped to an address
88                outside the 32bit range, since this doesn't work with
89                the hashtable. New macro morecore32bit.
90               
91   Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
92               FreeBSD/pthread, and Solaris 10 i386/pthread.
93   Fail    on: OpenBSD/pthread (in  _thread_machdep_save_float_state),
94               might be related to OpenBSD pthread internals (??).
95               Non-treaded version (#undef USE_MALLOC_LOCK)
96               works on OpenBSD.
97   
98   further to 1.0:
99   Valgrind client requests inserted (#define USE_VALGRIND)
100   Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
101   Portability: minsize = 32 bit on 64bit architecture
102   Minor cleanups
103   Fix: eliminate prototypes for memset, memcpy (they're in string.h)
104   Fix: one more malloc_consolidate segfault
105
106   There may be some bugs left in this version. please use with caution.
107*/
108
109
110
111/* Please read the following papers for documentation:
112
113   Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
114   Countermeasures against Current and Future Code Injection Attacks,
115   Proceedings of the Third IEEE International Information Assurance
116   Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
117   IEEE, IEEE Press.
118   http://www.fort-knox.org/younany_countermeasures.pdf
119   
120   Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
121   Eynden. Security of Memory Allocators for C and C++. Technical Report
122   CW419, Departement Computerwetenschappen, Katholieke Universiteit
123   Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
124 
125 */
126
127/* Compile:
128   gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
129   "Link":
130   Dynamic:
131   gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
132   Static:
133   ar -rv libdnmalloc.a dnmalloc-portable.o
134   
135*/
136
137/*
138   dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
139   dlmalloc was released as public domain and contained the following license:
140   
141   "This is a version (aka dlmalloc) of malloc/free/realloc written by
142   Doug Lea and released to the public domain.  Use, modify, and
143   redistribute this code without permission or acknowledgement in any
144   way you wish.  Send questions, comments, complaints, performance
145   data, etc to dl@cs.oswego.edu
146   
147   * VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
148   
149   Note: There may be an updated version of this malloc obtainable at
150   ftp://gee.cs.oswego.edu/pub/misc/malloc.c
151   Check before installing!"
152   
153*/
154
155/* The following preprocessor macros are tested,
156 *   and hence should have #define directives:
157 *
158 *   HAVE_CONFIG_H    Define to #include "config.h" (autoconf-generated)
159 *
160 *   HAVE_UNISTD_H    Define to #include <unistd.h>
161 *
162 *   HAVE_SYS_UIO_H   Define to #include <sys/uio.h> (for writev)
163 *   HAVE_WRITEV      Define if the 'writev' function is available
164 *
165 *   HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
166 *
167 *   HAVE_MALLOC_H    Define to #include <malloc.h> (for struct mallinfo)
168 *
169 *   HAVE_FCNTL_H     Define to #include <fcntl.h>
170 *
171 *   HAVE_SYS_MMAN_H  Define to #include <sys/mman.h>
172 *   HAVE_MMAP        Define if the 'mmap' function is available.
173 *
174 *   HAVE_SCHED_H     Define to #include <sched.h>
175 *   HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
176 */
177
178
179/*
180  __STD_C should be nonzero if using ANSI-standard C compiler, a C++
181  compiler, or a C compiler sufficiently close to ANSI to get away
182  with it.
183*/
184
185#ifdef HAVE_CONFIG_H
186#include "config.h"
187#endif
188
189#ifdef USE_VALGRIND
190#include <valgrind/memcheck.h>
191#else
192#define VALGRIND_FREELIKE_BLOCK(a,b)       ((void)0)
193#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
194#define VALGRIND_CREATE_MEMPOOL(a,b,c)     ((void)0)
195#define VALGRIND_MEMPOOL_ALLOC(a,b,c)      ((void)0)
196#define VALGRIND_MEMPOOL_FREE(a,b)         ((void)0)
197#define VALGRIND_DESTROY_MEMPOOL(a)        ((void)0)
198#define VALGRIND_MAKE_MEM_DEFINED(a,b)     ((void)0)
199#define VALGRIND_MAKE_MEM_UNDEFINED(a,b)   ((void)0)
200#define VALGRIND_MAKE_MEM_NOACCESS(a,b)    ((void)0)
201#endif
202
203#if defined (__GNUC__) && __GNUC__ > 2
204# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
205# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
206# define __attribute_malloc__ __attribute__ ((__malloc__))
207#else
208# define LIKELY(x)       (x)
209# define UNLIKELY(x)     (x)
210# define __attribute_malloc__ /* Ignore */
211#endif
212
213/*
214  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
215  large blocks.  This is currently only possible on Linux with
216  kernel versions newer than 1.3.77.
217*/
218
219#ifndef HAVE_MREMAP
220#ifdef linux
221#define HAVE_MREMAP 1
222#define _GNU_SOURCE
223#else
224#define HAVE_MREMAP 0
225#endif
226#endif /* HAVE_MREMAP */
227
228
229
230#ifndef __STD_C
231#if defined(__STDC__) || defined(_cplusplus)
232#define __STD_C     1
233#else
234#define __STD_C     0
235#endif
236#endif /*__STD_C*/
237
238
239/*
240  Void_t* is the pointer type that malloc should say it returns
241*/
242
243#ifndef Void_t
244#if (__STD_C || defined(WIN32))
245#define Void_t      void
246#else
247#define Void_t      char
248#endif
249#endif /*Void_t*/
250
251#if __STD_C
252#include <stddef.h>   /* for size_t */
253#else
254#include <sys/types.h>
255#endif
256
257#if !defined(USE_SYSTEM_MALLOC)
258
259#ifdef __cplusplus
260extern "C" {
261#endif
262
263/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
264
265#ifdef HAVE_UNISTD_H
266#include <unistd.h>
267#endif
268
269#ifdef HAVE_SYS_UIO_H
270#include <sys/uio.h>
271#endif
272
273#include <stdio.h>    /* needed for malloc_stats */
274#include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
275
276#include <string.h>
277#include <stdlib.h>
278
279#include <sys/resource.h>
280
281extern int errno;
282
283  /* 0: lazy,
284   * 1: medium (assertions compiled in),
285   * 2: high (guard pages at end of hash table and ciregions)
286   * 3: paranoid (guards at end of each allocated chunk, check at free)
287   */
288#ifndef PARANOIA
289#define PARANOIA 9
290#endif
291
292  /* Using assert() with multithreading will cause the code
293   * to deadlock since glibc __assert_fail will call malloc().
294   * We need our very own assert().
295   */
296typedef void assert_handler_tp(const char * error, const char *file, int line);
297
298#if  PARANOIA > 0
299
300#ifdef NDEBUG
301#undef NDEBUG
302#endif
303
304static void default_assert_handler(const char *error, 
305                                   const char *file, int line)
306{
307#ifdef HAVE_WRITEV
308  struct iovec iov[5];
309  char * i1 = "assertion failed (";
310  char * i3 = "): ";
311  char * i5 = "\n";
312  int   res = 0;
313
314  iov[0].iov_base = i1;               iov[0].iov_len = strlen(i1); 
315  iov[1].iov_base = (char*) file;     iov[1].iov_len = strlen(file); 
316  iov[2].iov_base = i3;               iov[2].iov_len = strlen(i3); 
317  iov[3].iov_base = (char*) error;    iov[3].iov_len = strlen(error); 
318  iov[4].iov_base = i5;               iov[4].iov_len = strlen(i5);
319  do {
320    res = writev(STDERR_FILENO, iov, 5);
321  } while (res < 0 && errno == EINTR); 
322#else
323  fputs("assertion failed (", stderr);
324  fputs(file, stderr);
325  fputs("): ", stderr);
326  fputs(error, stderr);
327  fputc('\n', stderr);
328#endif
329  (void) line;
330  abort();
331}
332static assert_handler_tp *assert_handler = default_assert_handler;
333
334
335#define assert(x)                               \
336  do {                                          \
337    if (UNLIKELY(!(x))) {                       \
338      assert_handler(#x, __FILE__, __LINE__);   \
339    }                                           \
340  } while (0)
341
342#else
343
344static assert_handler_tp *assert_handler = NULL;
345#define NDEBUG
346#define assert(x) ((void)0)
347
348#endif
349
350assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
351{
352  assert_handler_tp *old = assert_handler;
353  assert_handler = new;
354  return old;
355}
356
357
358#include <stdarg.h>
359
360  /* define for debugging */
361  /* #define DNMALLOC_DEBUG */ 
362
363  /* Do some extra checks? if not, covered by assrt()s */
364  /* #define DNMALLOC_CHECKS */
365
366  /*
367    The unsigned integer type used for comparing any two chunk sizes.
368    This should be at least as wide as size_t, but should not be signed.
369  */
370
371#ifndef CHUNK_SIZE_T
372#define CHUNK_SIZE_T unsigned long
373#endif
374
375/*
376  The unsigned integer type used to hold addresses when they are are
377  manipulated as integers. Except that it is not defined on all
378  systems, intptr_t would suffice.
379*/
380#ifndef PTR_UINT
381#define PTR_UINT unsigned long
382#endif
383
384
385/*
386  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
387  of chunk sizes.
388
389  The default version is the same as size_t.
390
391  While not strictly necessary, it is best to define this as an
392  unsigned type, even if size_t is a signed type. This may avoid some
393  artificial size limitations on some systems.
394
395  On a 64-bit machine, you may be able to reduce malloc overhead by
396  defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
397  expense of not being able to handle more than 2^32 of malloced
398  space. If this limitation is acceptable, you are encouraged to set
399  this unless you are on a platform requiring 16byte alignments. In
400  this case the alignment requirements turn out to negate any
401  potential advantages of decreasing size_t word size.
402
403  Implementors: Beware of the possible combinations of:
404     - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
405       and might be the same width as int or as long
406     - size_t might have different width and signedness as INTERNAL_SIZE_T
407     - int and long might be 32 or 64 bits, and might be the same width
408  To deal with this, most comparisons and difference computations
409  among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
410  aware of the fact that casting an unsigned int to a wider long does
411  not sign-extend. (This also makes checking for negative numbers
412  awkward.) Some of these casts result in harmless compiler warnings
413  on some systems.
414*/
415
416#ifndef INTERNAL_SIZE_T
417#define INTERNAL_SIZE_T size_t
418#endif
419
420/* The corresponding word size */
421#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
422
423
424
425/*
426  MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
427  It must be a power of two at least 2 * SIZE_SZ, even on machines
428  for which smaller alignments would suffice. It may be defined as
429  larger than this though. Note however that code and data structures
430  are optimized for the case of 8-byte alignment.
431*/
432
433
434#ifndef MALLOC_ALIGNMENT
435#define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
436#endif
437
438/* The corresponding bit mask value */
439#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
440
441
442
443/*
444  REALLOC_ZERO_BYTES_FREES should be set if a call to
445  realloc with zero bytes should be the same as a call to free.
446  Some people think it should. Otherwise, since this malloc
447  returns a unique pointer for malloc(0), so does realloc(p, 0).
448*/
449
450#define REALLOC_ZERO_BYTES_FREES
451
452/*
453  TRIM_FASTBINS controls whether free() of a very small chunk can
454  immediately lead to trimming. Setting to true (1) can reduce memory
455  footprint, but will almost always slow down programs that use a lot
456  of small chunks.
457
458  Define this only if you are willing to give up some speed to more
459  aggressively reduce system-level memory footprint when releasing
460  memory in programs that use many small chunks.  You can get
461  essentially the same effect by setting MXFAST to 0, but this can
462  lead to even greater slowdowns in programs using many small chunks.
463  TRIM_FASTBINS is an in-between compile-time option, that disables
464  only those chunks bordering topmost memory from being placed in
465  fastbins.
466*/
467
468#ifndef TRIM_FASTBINS
469#define TRIM_FASTBINS  0
470#endif
471
472
473/*
474  USE_DL_PREFIX will prefix all public routines with the string 'dl'.
475  This is necessary when you only want to use this malloc in one part
476  of a program, using your regular system malloc elsewhere.
477*/
478
479/* #define USE_DL_PREFIX */
480
481
482/*
483  USE_MALLOC_LOCK causes wrapper functions to surround each
484  callable routine with pthread mutex lock/unlock.
485
486  USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
487*/
488
489/* #define USE_MALLOC_LOCK */
490
491
492/*
493  If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
494  actually a wrapper function that first calls MALLOC_PREACTION, then
495  calls the internal routine, and follows it with
496  MALLOC_POSTACTION. This is needed for locking, but you can also use
497  this, without USE_MALLOC_LOCK, for purposes of interception,
498  instrumentation, etc. It is a sad fact that using wrappers often
499  noticeably degrades performance of malloc-intensive programs.
500*/
501
502
503#ifdef USE_MALLOC_LOCK
504#define USE_PUBLIC_MALLOC_WRAPPERS
505#else
506/* #define USE_PUBLIC_MALLOC_WRAPPERS */
507#endif
508
509
510/*
511   Two-phase name translation.
512   All of the actual routines are given mangled names.
513   When wrappers are used, they become the public callable versions.
514   When DL_PREFIX is used, the callable names are prefixed.
515*/
516
517#ifndef USE_PUBLIC_MALLOC_WRAPPERS
518#define cALLOc      public_cALLOc
519#define fREe        public_fREe
520#define mALLOc      public_mALLOc
521#define mEMALIGn    public_mEMALIGn
522#define posix_mEMALIGn    public_posix_mEMALIGn
523#define rEALLOc     public_rEALLOc
524#define vALLOc      public_vALLOc
525#define pVALLOc     public_pVALLOc
526#define mALLINFo    public_mALLINFo
527#define mALLOPt     public_mALLOPt
528#define mTRIm       public_mTRIm
529#define mSTATs      public_mSTATs
530#define mUSABLe     public_mUSABLe
531#endif
532
533#ifdef USE_DL_PREFIX
534#define public_cALLOc    dlcalloc
535#define public_fREe      dlfree
536#define public_mALLOc    dlmalloc
537#define public_mEMALIGn  dlmemalign
538#define public_posix_mEMALIGn  dlposix_memalign
539#define public_rEALLOc   dlrealloc
540#define public_vALLOc    dlvalloc
541#define public_pVALLOc   dlpvalloc
542#define public_mALLINFo  dlmallinfo
543#define public_mALLOPt   dlmallopt
544#define public_mTRIm     dlmalloc_trim
545#define public_mSTATs    dlmalloc_stats
546#define public_mUSABLe   dlmalloc_usable_size
547#else /* USE_DL_PREFIX */
548#define public_cALLOc    calloc
549#define public_fREe      free
550#define public_mALLOc    malloc
551#define public_mEMALIGn  memalign
552#define public_posix_mEMALIGn  posix_memalign
553#define public_rEALLOc   realloc
554#define public_vALLOc    valloc
555#define public_pVALLOc   pvalloc
556#define public_mALLINFo  mallinfo
557#define public_mALLOPt   mallopt
558#define public_mTRIm     malloc_trim
559#define public_mSTATs    malloc_stats
560#define public_mUSABLe   malloc_usable_size
561#endif /* USE_DL_PREFIX */
562
563
564/*
565  HAVE_MEMCPY should be defined if you are not otherwise using
566  ANSI STD C, but still have memcpy and memset in your C library
567  and want to use them in calloc and realloc. Otherwise simple
568  macro versions are defined below.
569
570  USE_MEMCPY should be defined as 1 if you actually want to
571  have memset and memcpy called. People report that the macro
572  versions are faster than libc versions on some systems.
573 
574  Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
575  (of <= 36 bytes) are manually unrolled in realloc and calloc.
576*/
577
578#ifndef HAVE_MEMCPY
579#define HAVE_MEMCPY
580#endif
581
582#ifndef USE_MEMCPY
583#ifdef HAVE_MEMCPY
584#define USE_MEMCPY 1
585#else
586#define USE_MEMCPY 0
587#endif
588#endif
589
590
591#if (__STD_C || defined(HAVE_MEMCPY))
592
593#ifdef WIN32
594  /* On Win32 memset and memcpy are already declared in windows.h */
595#else
596#if __STD_C
597  /* Defined in string.h */
598#else
599Void_t* memset();
600Void_t* memcpy();
601#endif
602#endif
603#endif
604
605/*
606  MALLOC_FAILURE_ACTION is the action to take before "return 0" when
607  malloc fails to be able to return memory, either because memory is
608  exhausted or because of illegal arguments.
609 
610  By default, sets errno if running on STD_C platform, else does nothing. 
611*/
612
613#ifndef MALLOC_FAILURE_ACTION
614#if __STD_C
615#define MALLOC_FAILURE_ACTION \
616   errno = ENOMEM;
617
618#else
619#define MALLOC_FAILURE_ACTION
620#endif
621#endif
622
623/*
624  MORECORE-related declarations. By default, rely on sbrk
625*/
626
627
628#if !defined(HAVE_UNISTD_H)
629#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
630#if __STD_C
631extern Void_t*     sbrk(ptrdiff_t);
632#else
633extern Void_t*     sbrk();
634#endif
635#endif
636#endif
637
638/*
639  MORECORE_FAILURE is the value returned upon failure of MORECORE
640  as well as mmap. Since it cannot be an otherwise valid memory address,
641  and must reflect values of standard sys calls, you probably ought not
642  try to redefine it.
643*/
644
645#ifndef MORECORE_FAILURE
646#define MORECORE_FAILURE ((void*)(-1UL))
647#endif
648
649/*
650  MORECORE is the name of the routine to call to obtain more memory
651  from the system.  See below for general guidance on writing
652  alternative MORECORE functions, as well as a version for WIN32 and a
653  sample version for pre-OSX macos.
654*/
655
656#ifndef MORECORE
657#define MORECORE sbrk
658#endif
659
660
661/*
662  If MORECORE_CONTIGUOUS is true, take advantage of fact that
663  consecutive calls to MORECORE with positive arguments always return
664  contiguous increasing addresses.  This is true of unix sbrk.  Even
665  if not defined, when regions happen to be contiguous, malloc will
666  permit allocations spanning regions obtained from different
667  calls. But defining this when applicable enables some stronger
668  consistency checks and space efficiencies.
669*/
670
671#ifndef MORECORE_CONTIGUOUS
672#define MORECORE_CONTIGUOUS 1
673#endif
674
675/*
676  Define MORECORE_CANNOT_TRIM if your version of MORECORE
677  cannot release space back to the system when given negative
678  arguments. This is generally necessary only if you are using
679  a hand-crafted MORECORE function that cannot handle negative arguments.
680*/
681
682/* #define MORECORE_CANNOT_TRIM */
683
684
685/*
686  This malloc requires mmap for heap management data. It is an error
687  if mmap is not available.
688
689  Additionally, mmap will be used to satisfy large requests.
690*/
691
692#ifndef HAVE_MMAP
693#  error HAVE_MMAP not defined, has your operating system mmap?
694#endif
695
696/*
697   Standard unix mmap using /dev/zero clears memory so calloc doesn't
698   need to.
699*/
700
701#ifndef MMAP_CLEARS
702#define MMAP_CLEARS 1
703#endif
704
705
706/*
707   MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
708   sbrk fails, and mmap is used as a backup (which is done only if
709   HAVE_MMAP).  The value must be a multiple of page size.  This
710   backup strategy generally applies only when systems have "holes" in
711   address space, so sbrk cannot perform contiguous expansion, but
712   there is still space available on system.  On systems for which
713   this is known to be useful (i.e. most linux kernels), this occurs
714   only when programs allocate huge amounts of memory.  Between this,
715   and the fact that mmap regions tend to be limited, the size should
716   be large, to avoid too many mmap calls and thus avoid running out
717   of kernel resources.
718*/
719
720#ifndef MMAP_AS_MORECORE_SIZE
721#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
722#endif
723
724
725/*
726  The system page size. To the extent possible, this malloc manages
727  memory from the system in page-size units.  Note that this value is
728  cached during initialization into a field of malloc_state. So even
729  if malloc_getpagesize is a function, it is only called once.
730
731  The following mechanics for getpagesize were adapted from bsd/gnu
732  getpagesize.h. If none of the system-probes here apply, a value of
733  4096 is used, which should be OK: If they don't apply, then using
734  the actual value probably doesn't impact performance.
735*/
736
737
738#ifndef malloc_getpagesize
739
740#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
741#    ifndef _SC_PAGE_SIZE
742#      define _SC_PAGE_SIZE _SC_PAGESIZE
743#    endif
744#  endif
745
746#  ifdef _SC_PAGE_SIZE
747#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
748#  else
749#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
750       extern size_t getpagesize();
751#      define malloc_getpagesize getpagesize()
752#    else
753#      ifdef WIN32 /* use supplied emulation of getpagesize */
754#        define malloc_getpagesize getpagesize()
755#      else
756#        if defined(HAVE_SYS_PARAM_H)
757#          include <sys/param.h>
758#        endif
759#        ifdef EXEC_PAGESIZE
760#          define malloc_getpagesize EXEC_PAGESIZE
761#        else
762#          ifdef NBPG
763#            ifndef CLSIZE
764#              define malloc_getpagesize NBPG
765#            else
766#              define malloc_getpagesize (NBPG * CLSIZE)
767#            endif
768#          else
769#            ifdef NBPC
770#              define malloc_getpagesize NBPC
771#            else
772#              ifdef PAGESIZE
773#                define malloc_getpagesize PAGESIZE
774#              else /* just guess */
775#                define malloc_getpagesize (4096)
776#              endif
777#            endif
778#          endif
779#        endif
780#      endif
781#    endif
782#  endif
783#endif
784
785/*
786  This version of malloc supports the standard SVID/XPG mallinfo
787  routine that returns a struct containing usage properties and
788  statistics. It should work on any SVID/XPG compliant system that has
789  a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
790  install such a thing yourself, cut out the preliminary declarations
791  as described above and below and save them in a malloc.h file. But
792  there's no compelling reason to bother to do this.)
793
794  The main declaration needed is the mallinfo struct that is returned
795  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
796  bunch of fields that are not even meaningful in this version of
797  malloc.  These fields are are instead filled by mallinfo() with
798  other numbers that might be of interest.
799
800  HAVE_MALLOC_H should be set if you have a
801  /usr/include/malloc.h file that includes a declaration of struct
802  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
803  version is declared below.  These must be precisely the same for
804  mallinfo() to work.  The original SVID version of this struct,
805  defined on most systems with mallinfo, declares all fields as
806  ints. But some others define as unsigned long. If your system
807  defines the fields using a type of different width than listed here,
808  you must #include your system version and #define
809  HAVE_MALLOC_H.
810*/
811
812/* #define HAVE_MALLOC_H */
813
814/* On *BSD, malloc.h is deprecated, and on some *BSD including
815 * it may actually raise an error.
816 */
817#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
818#include <malloc.h>
819#else
820
821/* SVID2/XPG mallinfo structure */
822
823struct mallinfo {
824  int arena;    /* non-mmapped space allocated from system */
825  int ordblks;  /* number of free chunks */
826  int smblks;   /* number of fastbin blocks */
827  int hblks;    /* number of mmapped regions */
828  int hblkhd;   /* space in mmapped regions */
829  int usmblks;  /* maximum total allocated space */
830  int fsmblks;  /* space available in freed fastbin blocks */
831  int uordblks; /* total allocated space */
832  int fordblks; /* total free space */
833  int keepcost; /* top-most, releasable (via malloc_trim) space */
834};
835
836/*
837  SVID/XPG defines four standard parameter numbers for mallopt,
838  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
839  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
840  so setting them has no effect. But this malloc also supports other
841  options in mallopt described below.
842*/
843#endif
844
845
846/* ---------- description of public routines ------------ */
847
848/*
849  malloc(size_t n)
850  Returns a pointer to a newly allocated chunk of at least n bytes, or null
851  if no space is available. Additionally, on failure, errno is
852  set to ENOMEM on ANSI C systems.
853
854  If n is zero, malloc returns a minumum-sized chunk. (The minimum
855  size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
856  systems.)  On most systems, size_t is an unsigned type, so calls
857  with negative arguments are interpreted as requests for huge amounts
858  of space, which will often fail. The maximum supported value of n
859  differs across systems, but is in all cases less than the maximum
860  representable value of a size_t.
861*/
862#if __STD_C
863Void_t*  public_mALLOc(size_t) __attribute_malloc__;
864#else
865Void_t*  public_mALLOc();
866#endif
867
868/*
869  free(Void_t* p)
870  Releases the chunk of memory pointed to by p, that had been previously
871  allocated using malloc or a related routine such as realloc.
872  It has no effect if p is null. It can have arbitrary (i.e., bad!)
873  effects if p has already been freed.
874
875  Unless disabled (using mallopt), freeing very large spaces will
876  when possible, automatically trigger operations that give
877  back unused memory to the system, thus reducing program footprint.
878*/
879#if __STD_C
880void     public_fREe(Void_t*);
881#else
882void     public_fREe();
883#endif
884
885/*
886  calloc(size_t n_elements, size_t element_size);
887  Returns a pointer to n_elements * element_size bytes, with all locations
888  set to zero.
889*/
890#if __STD_C
891Void_t*  public_cALLOc(size_t, size_t) __attribute_malloc__;
892#else
893Void_t*  public_cALLOc();
894#endif
895
896/*
897  realloc(Void_t* p, size_t n)
898  Returns a pointer to a chunk of size n that contains the same data
899  as does chunk p up to the minimum of (n, p's size) bytes, or null
900  if no space is available.
901
902  The returned pointer may or may not be the same as p. The algorithm
903  prefers extending p when possible, otherwise it employs the
904  equivalent of a malloc-copy-free sequence.
905
906  If p is null, realloc is equivalent to malloc. 
907
908  If space is not available, realloc returns null, errno is set (if on
909  ANSI) and p is NOT freed.
910
911  if n is for fewer bytes than already held by p, the newly unused
912  space is lopped off and freed if possible.  Unless the #define
913  REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
914  zero (re)allocates a minimum-sized chunk.
915
916  Large chunks that were internally obtained via mmap will always
917  be reallocated using malloc-copy-free sequences unless
918  the system supports MREMAP (currently only linux).
919
920  The old unix realloc convention of allowing the last-free'd chunk
921  to be used as an argument to realloc is not supported.
922*/
923#if __STD_C
924Void_t*  public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
925#else
926Void_t*  public_rEALLOc();
927#endif
928
929/*
930  memalign(size_t alignment, size_t n);
931  Returns a pointer to a newly allocated chunk of n bytes, aligned
932  in accord with the alignment argument.
933
934  The alignment argument should be a power of two. If the argument is
935  not a power of two, the nearest greater power is used.
936  8-byte alignment is guaranteed by normal malloc calls, so don't
937  bother calling memalign with an argument of 8 or less.
938
939  Overreliance on memalign is a sure way to fragment space.
940*/
941#if __STD_C
942Void_t*  public_mEMALIGn(size_t, size_t) __attribute_malloc__;
943#else
944Void_t*  public_mEMALIGn();
945#endif
946
947/*
948  posix_memalign(void** memptr, size_t alignment, size_t n);
949  Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
950  in accord with the alignment argument. Returns 0 on success, otherwise
951  an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
952
953  The alignment must be a power of two, and a multiple of sizeof(void *).
954*/
955#if __STD_C
956int public_posix_mEMALIGn(Void_t**, size_t, size_t);
957#else
958int public_posix_mEMALIGn();
959#endif
960
961/*
962  valloc(size_t n);
963  Equivalent to memalign(pagesize, n), where pagesize is the page
964  size of the system. If the pagesize is unknown, 4096 is used.
965*/
966#if __STD_C
967Void_t*  public_vALLOc(size_t) __attribute_malloc__;
968#else
969Void_t*  public_vALLOc();
970#endif
971
972
973
974/*
975  mallopt(int parameter_number, int parameter_value)
976  Sets tunable parameters The format is to provide a
977  (parameter-number, parameter-value) pair.  mallopt then sets the
978  corresponding parameter to the argument value if it can (i.e., so
979  long as the value is meaningful), and returns 1 if successful else
980  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
981  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
982  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
983  so setting them has no effect. But this malloc also supports four
984  other options in mallopt. See below for details.  Briefly, supported
985  parameters are as follows (listed defaults are for "typical"
986  configurations).
987
988  Symbol            param #   default    allowed param values
989  M_MXFAST          1         64         0-80  (0 disables fastbins)
990  M_TRIM_THRESHOLD -1         256*1024   any   (-1U disables trimming)
991  M_TOP_PAD        -2         0          any 
992  M_MMAP_THRESHOLD -3         256*1024   any   (or 0 if no MMAP support)
993  M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
994*/
995#if __STD_C
996int      public_mALLOPt(int, int);
997#else
998int      public_mALLOPt();
999#endif
1000
1001
1002/*
1003  mallinfo()
1004  Returns (by copy) a struct containing various summary statistics:
1005
1006  arena:     current total non-mmapped bytes allocated from system
1007  ordblks:   the number of free chunks
1008  smblks:    the number of fastbin blocks (i.e., small chunks that
1009               have been freed but not use resused or consolidated)
1010  hblks:     current number of mmapped regions
1011  hblkhd:    total bytes held in mmapped regions
1012  usmblks:   the maximum total allocated space. This will be greater
1013                than current total if trimming has occurred.
1014  fsmblks:   total bytes held in fastbin blocks
1015  uordblks:  current total allocated space (normal or mmapped)
1016  fordblks:  total free space
1017  keepcost:  the maximum number of bytes that could ideally be released
1018               back to system via malloc_trim. ("ideally" means that
1019               it ignores page restrictions etc.)
1020
1021  Because these fields are ints, but internal bookkeeping may
1022  be kept as longs, the reported values may wrap around zero and
1023  thus be inaccurate.
1024*/
1025#if __STD_C
1026struct mallinfo public_mALLINFo(void);
1027#else
1028struct mallinfo public_mALLINFo();
1029#endif
1030
1031/*
1032  pvalloc(size_t n);
1033  Equivalent to valloc(minimum-page-that-holds(n)), that is,
1034  round up n to nearest pagesize.
1035 */
1036#if __STD_C
1037Void_t*  public_pVALLOc(size_t) __attribute_malloc__;
1038#else
1039Void_t*  public_pVALLOc();
1040#endif
1041
1042/*
1043  malloc_trim(size_t pad);
1044
1045  If possible, gives memory back to the system (via negative
1046  arguments to sbrk) if there is unused memory at the `high' end of
1047  the malloc pool. You can call this after freeing large blocks of
1048  memory to potentially reduce the system-level memory requirements
1049  of a program. However, it cannot guarantee to reduce memory. Under
1050  some allocation patterns, some large free blocks of memory will be
1051  locked between two used chunks, so they cannot be given back to
1052  the system.
1053 
1054  The `pad' argument to malloc_trim represents the amount of free
1055  trailing space to leave untrimmed. If this argument is zero,
1056  only the minimum amount of memory to maintain internal data
1057  structures will be left (one page or less). Non-zero arguments
1058  can be supplied to maintain enough trailing space to service
1059  future expected allocations without having to re-obtain memory
1060  from the system.
1061 
1062  Malloc_trim returns 1 if it actually released any memory, else 0.
1063  On systems that do not support "negative sbrks", it will always
1064  rreturn 0.
1065*/
1066#if __STD_C
1067int      public_mTRIm(size_t);
1068#else
1069int      public_mTRIm();
1070#endif
1071
1072/*
1073  malloc_usable_size(Void_t* p);
1074
1075  Returns the number of bytes you can actually use in
1076  an allocated chunk, which may be more than you requested (although
1077  often not) due to alignment and minimum size constraints.
1078  You can use this many bytes without worrying about
1079  overwriting other allocated objects. This is not a particularly great
1080  programming practice. malloc_usable_size can be more useful in
1081  debugging and assertions, for example:
1082
1083  p = malloc(n);
1084  assert(malloc_usable_size(p) >= 256);
1085
1086*/
1087#if __STD_C
1088size_t   public_mUSABLe(Void_t*);
1089#else
1090size_t   public_mUSABLe();
1091#endif
1092
1093/*
1094  malloc_stats();
1095  Prints on stderr the amount of space obtained from the system (both
1096  via sbrk and mmap), the maximum amount (which may be more than
1097  current if malloc_trim and/or munmap got called), and the current
1098  number of bytes allocated via malloc (or realloc, etc) but not yet
1099  freed. Note that this is the number of bytes allocated, not the
1100  number requested. It will be larger than the number requested
1101  because of alignment and bookkeeping overhead. Because it includes
1102  alignment wastage as being in use, this figure may be greater than
1103  zero even when no user-level chunks are allocated.
1104
1105  The reported current and maximum system memory can be inaccurate if
1106  a program makes other calls to system memory allocation functions
1107  (normally sbrk) outside of malloc.
1108
1109  malloc_stats prints only the most commonly interesting statistics.
1110  More information can be obtained by calling mallinfo.
1111
1112*/
1113#if __STD_C
1114void     public_mSTATs();
1115#else
1116void     public_mSTATs();
1117#endif
1118
1119/* mallopt tuning options */
1120
1121/*
1122  M_MXFAST is the maximum request size used for "fastbins", special bins
1123  that hold returned chunks without consolidating their spaces. This
1124  enables future requests for chunks of the same size to be handled
1125  very quickly, but can increase fragmentation, and thus increase the
1126  overall memory footprint of a program.
1127
1128  This malloc manages fastbins very conservatively yet still
1129  efficiently, so fragmentation is rarely a problem for values less
1130  than or equal to the default.  The maximum supported value of MXFAST
1131  is 80. You wouldn't want it any higher than this anyway.  Fastbins
1132  are designed especially for use with many small structs, objects or
1133  strings -- the default handles structs/objects/arrays with sizes up
1134  to 16 4byte fields, or small strings representing words, tokens,
1135  etc. Using fastbins for larger objects normally worsens
1136  fragmentation without improving speed.
1137
1138  M_MXFAST is set in REQUEST size units. It is internally used in
1139  chunksize units, which adds padding and alignment.  You can reduce
1140  M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
1141  algorithm to be a closer approximation of fifo-best-fit in all cases,
1142  not just for larger requests, but will generally cause it to be
1143  slower.
1144*/
1145
1146
1147/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1148#ifndef M_MXFAST
1149#define M_MXFAST            1   
1150#endif
1151
1152#ifndef DEFAULT_MXFAST
1153#define DEFAULT_MXFAST     64
1154#endif
1155
1156
1157/*
1158  M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1159  to keep before releasing via malloc_trim in free().
1160
1161  Automatic trimming is mainly useful in long-lived programs.
1162  Because trimming via sbrk can be slow on some systems, and can
1163  sometimes be wasteful (in cases where programs immediately
1164  afterward allocate more large chunks) the value should be high
1165  enough so that your overall system performance would improve by
1166  releasing this much memory.
1167
1168  The trim threshold and the mmap control parameters (see below)
1169  can be traded off with one another. Trimming and mmapping are
1170  two different ways of releasing unused memory back to the
1171  system. Between these two, it is often possible to keep
1172  system-level demands of a long-lived program down to a bare
1173  minimum. For example, in one test suite of sessions measuring
1174  the XF86 X server on Linux, using a trim threshold of 128K and a
1175  mmap threshold of 192K led to near-minimal long term resource
1176  consumption.
1177
1178  If you are using this malloc in a long-lived program, it should
1179  pay to experiment with these values.  As a rough guide, you
1180  might set to a value close to the average size of a process
1181  (program) running on your system.  Releasing this much memory
1182  would allow such a process to run in memory.  Generally, it's
1183  worth it to tune for trimming rather tham memory mapping when a
1184  program undergoes phases where several large chunks are
1185  allocated and released in ways that can reuse each other's
1186  storage, perhaps mixed with phases where there are no such
1187  chunks at all.  And in well-behaved long-lived programs,
1188  controlling release of large blocks via trimming versus mapping
1189  is usually faster.
1190
1191  However, in most programs, these parameters serve mainly as
1192  protection against the system-level effects of carrying around
1193  massive amounts of unneeded memory. Since frequent calls to
1194  sbrk, mmap, and munmap otherwise degrade performance, the default
1195  parameters are set to relatively high values that serve only as
1196  safeguards.
1197
1198  The trim value must be greater than page size to have any useful
1199  effect.  To disable trimming completely, you can set to
1200  (unsigned long)(-1)
1201
1202  Trim settings interact with fastbin (MXFAST) settings: Unless
1203  TRIM_FASTBINS is defined, automatic trimming never takes place upon
1204  freeing a chunk with size less than or equal to MXFAST. Trimming is
1205  instead delayed until subsequent freeing of larger chunks. However,
1206  you can still force an attempted trim by calling malloc_trim.
1207
1208  Also, trimming is not generally possible in cases where
1209  the main arena is obtained via mmap.
1210
1211  Note that the trick some people use of mallocing a huge space and
1212  then freeing it at program startup, in an attempt to reserve system
1213  memory, doesn't have the intended effect under automatic trimming,
1214  since that memory will immediately be returned to the system.
1215*/
1216
1217#define M_TRIM_THRESHOLD       -1
1218
1219#ifndef DEFAULT_TRIM_THRESHOLD
1220#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1221#endif
1222
1223/*
1224  M_TOP_PAD is the amount of extra `padding' space to allocate or
1225  retain whenever sbrk is called. It is used in two ways internally:
1226
1227  * When sbrk is called to extend the top of the arena to satisfy
1228  a new malloc request, this much padding is added to the sbrk
1229  request.
1230
1231  * When malloc_trim is called automatically from free(),
1232  it is used as the `pad' argument.
1233
1234  In both cases, the actual amount of padding is rounded
1235  so that the end of the arena is always a system page boundary.
1236
1237  The main reason for using padding is to avoid calling sbrk so
1238  often. Having even a small pad greatly reduces the likelihood
1239  that nearly every malloc request during program start-up (or
1240  after trimming) will invoke sbrk, which needlessly wastes
1241  time.
1242
1243  Automatic rounding-up to page-size units is normally sufficient
1244  to avoid measurable overhead, so the default is 0.  However, in
1245  systems where sbrk is relatively slow, it can pay to increase
1246  this value, at the expense of carrying around more memory than
1247  the program needs.
1248*/
1249
1250#define M_TOP_PAD              -2
1251
1252#ifndef DEFAULT_TOP_PAD
1253#define DEFAULT_TOP_PAD        (0)
1254#endif
1255
1256/*
1257  M_MMAP_THRESHOLD is the request size threshold for using mmap()
1258  to service a request. Requests of at least this size that cannot
1259  be allocated using already-existing space will be serviced via mmap.
1260  (If enough normal freed space already exists it is used instead.)
1261
1262  Using mmap segregates relatively large chunks of memory so that
1263  they can be individually obtained and released from the host
1264  system. A request serviced through mmap is never reused by any
1265  other request (at least not directly; the system may just so
1266  happen to remap successive requests to the same locations).
1267
1268  Segregating space in this way has the benefits that:
1269
1270   1. Mmapped space can ALWAYS be individually released back
1271      to the system, which helps keep the system level memory
1272      demands of a long-lived program low.
1273   2. Mapped memory can never become `locked' between
1274      other chunks, as can happen with normally allocated chunks, which
1275      means that even trimming via malloc_trim would not release them.
1276   3. On some systems with "holes" in address spaces, mmap can obtain
1277      memory that sbrk cannot.
1278
1279  However, it has the disadvantages that:
1280
1281   1. The space cannot be reclaimed, consolidated, and then
1282      used to service later requests, as happens with normal chunks.
1283   2. It can lead to more wastage because of mmap page alignment
1284      requirements
1285   3. It causes malloc performance to be more dependent on host
1286      system memory management support routines which may vary in
1287      implementation quality and may impose arbitrary
1288      limitations. Generally, servicing a request via normal
1289      malloc steps is faster than going through a system's mmap.
1290
1291  The advantages of mmap nearly always outweigh disadvantages for
1292  "large" chunks, but the value of "large" varies across systems.  The
1293  default is an empirically derived value that works well in most
1294  systems.
1295*/
1296
1297#define M_MMAP_THRESHOLD      -3
1298
1299#ifndef DEFAULT_MMAP_THRESHOLD
1300#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1301#endif
1302
1303/*
1304  M_MMAP_MAX is the maximum number of requests to simultaneously
1305  service using mmap. This parameter exists because
1306. Some systems have a limited number of internal tables for
1307  use by mmap, and using more than a few of them may degrade
1308  performance.
1309
1310  The default is set to a value that serves only as a safeguard.
1311  Setting to 0 disables use of mmap for servicing large requests.  If
1312  HAVE_MMAP is not set, the default value is 0, and attempts to set it
1313  to non-zero values in mallopt will fail.
1314*/
1315
1316#define M_MMAP_MAX             -4
1317
1318#ifndef DEFAULT_MMAP_MAX
1319#define DEFAULT_MMAP_MAX       (65536)
1320#endif
1321
1322#ifdef __cplusplus
1323};  /* end of extern "C" */
1324#endif
1325
1326/*
1327  ========================================================================
1328  To make a fully customizable malloc.h header file, cut everything
1329  above this line, put into file malloc.h, edit to suit, and #include it
1330  on the next line, as well as in programs that use this malloc.
1331  ========================================================================
1332*/
1333
1334/* #include "malloc.h" */
1335
1336/* --------------------- public wrappers ---------------------- */
1337
1338#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1339
1340/* DL_STATIC used to make functions (deep down) consistent
1341 * with prototypes (otherwise the prototypes are static
1342 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1343 * The gcc compiler doesn't care, but the HP-UX compiler does.
1344 */
1345#define DL_STATIC static
1346
1347/* Declare all routines as internal */
1348#if __STD_C
1349static Void_t*  mALLOc(size_t) __attribute_malloc__;
1350static void     fREe(Void_t*);
1351static Void_t*  rEALLOc(Void_t*, size_t) __attribute_malloc__;
1352static Void_t*  mEMALIGn(size_t, size_t) __attribute_malloc__;
1353static int      posix_mEMALIGn(Void_t**, size_t, size_t);
1354static Void_t*  vALLOc(size_t) __attribute_malloc__;
1355static Void_t*  pVALLOc(size_t) __attribute_malloc__;
1356static Void_t*  cALLOc(size_t, size_t) __attribute_malloc__;
1357static int      mTRIm(size_t);
1358static size_t   mUSABLe(Void_t*);
1359static void     mSTATs();
1360static int      mALLOPt(int, int);
1361static struct mallinfo mALLINFo(void);
1362#else
1363static Void_t*  mALLOc();
1364static void     fREe();
1365static Void_t*  rEALLOc();
1366static Void_t*  mEMALIGn();
1367static int      posix_mEMALIGn();
1368static Void_t*  vALLOc();
1369static Void_t*  pVALLOc();
1370static Void_t*  cALLOc();
1371static int      mTRIm();
1372static size_t   mUSABLe();
1373static void     mSTATs();
1374static int      mALLOPt();
1375static struct mallinfo mALLINFo();
1376#endif
1377
1378/*
1379  MALLOC_PREACTION and MALLOC_POSTACTION should be
1380  defined to return 0 on success, and nonzero on failure.
1381  The return value of MALLOC_POSTACTION is currently ignored
1382  in wrapper functions since there is no reasonable default
1383  action to take on failure.
1384*/
1385
1386
1387#ifdef USE_MALLOC_LOCK
1388
1389# ifdef WIN32
1390
1391static int mALLOC_MUTEx;
1392#define MALLOC_PREACTION   slwait(&mALLOC_MUTEx)
1393#define MALLOC_POSTACTION  slrelease(&mALLOC_MUTEx)
1394int dnmalloc_pthread_init(void) { return 0; }
1395
1396# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1397
1398#   if defined(__NetBSD__)
1399#include <reentrant.h>
1400extern int __isthreaded;
1401static mutex_t thread_lock = MUTEX_INITIALIZER;
1402#define _MALLOC_LOCK()   if (__isthreaded) mutex_lock(&thread_lock)
1403#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1404void _malloc_prefork(void) {  _MALLOC_LOCK(); }
1405void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1406#   endif
1407
1408#   if defined(__OpenBSD__)
1409extern int  __isthreaded;
1410void   _thread_malloc_lock(void);
1411void   _thread_malloc_unlock(void);
1412#define _MALLOC_LOCK()           if (__isthreaded) _thread_malloc_lock()
1413#define _MALLOC_UNLOCK()         if (__isthreaded) _thread_malloc_unlock()
1414#   endif
1415
1416#   if defined(__FreeBSD__)
1417extern int      __isthreaded;
1418struct _spinlock {
1419        volatile long   access_lock;
1420        volatile long   lock_owner;
1421        volatile char   *fname;
1422        volatile int    lineno;
1423};
1424typedef struct _spinlock spinlock_t;
1425#define _SPINLOCK_INITIALIZER   { 0, 0, 0, 0 }
1426void    _spinlock(spinlock_t *);
1427void    _spinunlock(spinlock_t *);
1428/* # include "/usr/src/lib/libc/include/spinlock.h" */
1429static spinlock_t thread_lock   = _SPINLOCK_INITIALIZER;
1430spinlock_t *__malloc_lock       = &thread_lock;
1431#define _MALLOC_LOCK()           if (__isthreaded) _spinlock(&thread_lock)
1432#define _MALLOC_UNLOCK()         if (__isthreaded) _spinunlock(&thread_lock)
1433#   endif
1434
1435/* Common for all three *BSD
1436 */
1437static int malloc_active = 0;
1438static int dnmalloc_mutex_lock()
1439{
1440  _MALLOC_LOCK();
1441  if (!malloc_active)
1442    {
1443      ++malloc_active;
1444      return 0;
1445    }
1446  assert(malloc_active == 0);
1447  _MALLOC_UNLOCK();
1448  errno = EDEADLK;
1449  return 1;
1450}
1451static int dnmalloc_mutex_unlock()
1452{
1453  --malloc_active;
1454  _MALLOC_UNLOCK();
1455  return 0;
1456}
1457#define MALLOC_PREACTION   dnmalloc_mutex_lock()
1458#define MALLOC_POSTACTION  dnmalloc_mutex_unlock()
1459int dnmalloc_pthread_init(void) { return 0; }
1460
1461# else
1462
1463/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1464 *
1465 * Works fine on linux (no malloc in pthread_mutex_lock)
1466 * Works with on HP-UX if initialized after entering main()
1467 */ 
1468#include <pthread.h>
1469static int malloc_active      = 0;
1470void dnmalloc_fork_prepare(void);
1471void dnmalloc_fork_parent(void);
1472void dnmalloc_fork_child(void);
1473
1474#if !defined(__linux__)
1475
1476static pthread_mutex_t mALLOC_MUTEx;
1477pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1478static int dnmalloc_use_mutex = 0;
1479static void dnmalloc_pthread_init_int(void)
1480{
1481  pthread_mutexattr_t   mta;
1482  pthread_mutexattr_init(&mta);
1483  pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1484  pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1485  pthread_mutexattr_destroy(&mta);       
1486  pthread_atfork(dnmalloc_fork_prepare, 
1487                 dnmalloc_fork_parent,
1488                 dnmalloc_fork_child);
1489  dnmalloc_use_mutex = 1;
1490}
1491int dnmalloc_pthread_init(void)
1492{
1493  return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1494}
1495
1496#else
1497
1498static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1499static int dnmalloc_use_mutex = 1;
1500int dnmalloc_pthread_init(void) { 
1501  return pthread_atfork(dnmalloc_fork_prepare, 
1502                        dnmalloc_fork_parent,
1503                        dnmalloc_fork_child); 
1504}
1505#endif /* !defined(__linux__) */
1506
1507void dnmalloc_fork_prepare(void) { 
1508  if (dnmalloc_use_mutex) 
1509    pthread_mutex_lock(&mALLOC_MUTEx);
1510}
1511void dnmalloc_fork_parent(void) { 
1512  if (dnmalloc_use_mutex)
1513    pthread_mutex_unlock(&mALLOC_MUTEx); 
1514}
1515void dnmalloc_fork_child(void) {
1516  int rc = 0;
1517#ifdef __GLIBC__
1518  if (dnmalloc_use_mutex)
1519    {
1520      pthread_mutex_unlock (&mALLOC_MUTEx);
1521      pthread_mutex_destroy(&mALLOC_MUTEx);
1522      rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1523    } 
1524#else
1525  if (dnmalloc_use_mutex)
1526    rc = pthread_mutex_unlock(&mALLOC_MUTEx); 
1527#endif
1528  if (rc != 0)
1529    {
1530      fputs("fork_child failed", stderr);
1531      _exit(EXIT_FAILURE);
1532    }
1533}
1534static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1535{
1536  if (dnmalloc_use_mutex)
1537    {
1538      int rc = pthread_mutex_lock(mutex);
1539      if (rc == 0)
1540        {
1541          if (!malloc_active)
1542            {
1543              ++malloc_active;
1544              return 0;
1545            }
1546          assert(malloc_active == 0);
1547          (void) pthread_mutex_unlock(mutex);
1548          errno = EDEADLK;
1549          return 1;
1550        }
1551      return rc;
1552    }
1553  return 0;
1554}
1555static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1556{
1557  if (dnmalloc_use_mutex)
1558    {
1559      --malloc_active;
1560      return pthread_mutex_unlock(mutex);
1561    }
1562  return 0;
1563}
1564# define MALLOC_PREACTION   dnmalloc_mutex_lock(&mALLOC_MUTEx)
1565# define MALLOC_POSTACTION  dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1566
1567# endif
1568
1569#else
1570
1571/* Substitute anything you like for these */
1572
1573# define MALLOC_PREACTION   (0)
1574# define MALLOC_POSTACTION  (0)
1575int dnmalloc_pthread_init(void) { return 0; }
1576
1577#endif /* USE_MALLOC_LOCK */
1578
1579Void_t* public_mALLOc(size_t bytes) {
1580  Void_t* m;
1581  if (MALLOC_PREACTION == 0) {
1582    m = mALLOc(bytes);
1583    (void) MALLOC_POSTACTION;
1584    return m;
1585  }
1586  return 0;
1587}
1588
1589void public_fREe(Void_t* m) {
1590  if (MALLOC_PREACTION == 0) {
1591    fREe(m);
1592    (void) MALLOC_POSTACTION;
1593  }
1594}
1595
1596Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1597  if (MALLOC_PREACTION == 0) {
1598    m = rEALLOc(m, bytes);
1599    (void) MALLOC_POSTACTION;
1600    return m;
1601  }
1602  return 0;
1603}
1604
1605Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1606  Void_t* m;
1607  if (MALLOC_PREACTION == 0) {
1608    m = mEMALIGn(alignment, bytes);
1609    (void) MALLOC_POSTACTION;
1610    return m;
1611  }
1612  return 0;
1613}
1614
1615int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1616  int m, ret;
1617  if ((ret = MALLOC_PREACTION) == 0) {
1618    m = posix_mEMALIGn(memptr, alignment, bytes);
1619    (void) MALLOC_POSTACTION;
1620    return m;
1621  }
1622  return ret;
1623}
1624
1625Void_t* public_vALLOc(size_t bytes) {
1626  Void_t* m;
1627  if (MALLOC_PREACTION == 0) {
1628    m = vALLOc(bytes);
1629    (void) MALLOC_POSTACTION;
1630    return m;
1631  }
1632  return 0;
1633}
1634
1635Void_t* public_pVALLOc(size_t bytes) {
1636  Void_t* m;
1637  if (MALLOC_PREACTION == 0) {
1638    m = pVALLOc(bytes);
1639    (void) MALLOC_POSTACTION;
1640    return m;
1641  }
1642  return 0;
1643}
1644
1645Void_t* public_cALLOc(size_t n, size_t elem_size) {
1646  Void_t* m;
1647  if (MALLOC_PREACTION == 0) {
1648    m = cALLOc(n, elem_size);
1649    (void) MALLOC_POSTACTION;
1650    return m;
1651  }
1652  return 0;
1653}
1654
1655int public_mTRIm(size_t s) {
1656  int result;
1657  if (MALLOC_PREACTION == 0) {
1658    result = mTRIm(s);
1659    (void) MALLOC_POSTACTION;
1660    return result;
1661  }
1662  return 0;
1663}
1664
1665size_t public_mUSABLe(Void_t* m) {
1666  size_t result;
1667  if (MALLOC_PREACTION == 0) {
1668    result = mUSABLe(m);
1669    (void) MALLOC_POSTACTION;
1670    return result;
1671  }
1672  return 0;
1673}
1674
1675void public_mSTATs() {
1676  if (MALLOC_PREACTION == 0) {
1677    mSTATs();
1678    (void) MALLOC_POSTACTION;
1679  }
1680}
1681
1682struct mallinfo public_mALLINFo() {
1683  struct mallinfo m;
1684  if (MALLOC_PREACTION == 0) {
1685    m = mALLINFo();
1686    (void) MALLOC_POSTACTION;
1687    return m;
1688  } else {
1689    struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1690    return nm;
1691  }
1692}
1693
1694int public_mALLOPt(int p, int v) {
1695  int result;
1696  if (MALLOC_PREACTION == 0) {
1697    result = mALLOPt(p, v);
1698    (void) MALLOC_POSTACTION;
1699    return result;
1700  }
1701  return 0;
1702}
1703
1704#else
1705
1706int dnmalloc_pthread_init(void) { return 0; }
1707#define DL_STATIC
1708
1709#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1710
1711
1712
1713/* ------------- Optional versions of memcopy ---------------- */
1714
1715
1716#if USE_MEMCPY
1717
1718/*
1719  Note: memcpy is ONLY invoked with non-overlapping regions,
1720  so the (usually slower) memmove is not needed.
1721*/
1722
1723#define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
1724#define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
1725
1726#else /* !USE_MEMCPY */
1727
1728/* Use Duff's device for good zeroing/copying performance. */
1729
1730#define MALLOC_ZERO(charp, nbytes)                                            \
1731do {                                                                          \
1732  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
1733  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
1734  long mcn;                                                                   \
1735  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
1736  switch (mctmp) {                                                            \
1737    case 0: for(;;) { *mzp++ = 0;                                             \
1738    case 7:           *mzp++ = 0;                                             \
1739    case 6:           *mzp++ = 0;                                             \
1740    case 5:           *mzp++ = 0;                                             \
1741    case 4:           *mzp++ = 0;                                             \
1742    case 3:           *mzp++ = 0;                                             \
1743    case 2:           *mzp++ = 0;                                             \
1744    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
1745  }                                                                           \
1746} while(0)
1747
1748#define MALLOC_COPY(dest,src,nbytes)                                          \
1749do {                                                                          \
1750  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
1751  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
1752  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
1753  long mcn;                                                                   \
1754  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
1755  switch (mctmp) {                                                            \
1756    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
1757    case 7:           *mcdst++ = *mcsrc++;                                    \
1758    case 6:           *mcdst++ = *mcsrc++;                                    \
1759    case 5:           *mcdst++ = *mcsrc++;                                    \
1760    case 4:           *mcdst++ = *mcsrc++;                                    \
1761    case 3:           *mcdst++ = *mcsrc++;                                    \
1762    case 2:           *mcdst++ = *mcsrc++;                                    \
1763    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
1764  }                                                                           \
1765} while(0)
1766
1767#endif
1768
1769/* ------------------ MMAP support ------------------  */
1770
1771
1772#if defined(HAVE_FCNTL_H)
1773#include <fcntl.h>
1774#endif
1775
1776#if defined(HAVE_SYS_MMAN_H)
1777#include <sys/mman.h>
1778#endif
1779
1780#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1781#define MAP_ANONYMOUS MAP_ANON
1782#endif
1783
1784/*
1785   Nearly all versions of mmap support MAP_ANONYMOUS,
1786   so the following is unlikely to be needed, but is
1787   supplied just in case.
1788*/
1789
1790#ifndef MAP_ANONYMOUS
1791
1792/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1793 */
1794void * anon_mmap (void *addr, size_t length, int prot, int flags)
1795{
1796  void * retval   = NULL;
1797  int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1798
1799  dev_zero_fd = open("/dev/zero", O_RDWR);
1800  if (dev_zero_fd >= 0)
1801    {
1802      retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1803      /* closing the file descriptor does not unmap the region */
1804      close(dev_zero_fd); 
1805    }
1806  return retval;
1807}
1808 
1809#define MMAP(addr, size, prot, flags) \
1810  (anon_mmap((addr), (size), (prot), (flags)))
1811
1812
1813#else /* have MAP_ANONYMOUS */
1814
1815#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1816#define MAP_32BIT MAP_ADDR32
1817#endif
1818
1819#if defined(MAP_32BIT)
1820#define MMAP(addr, size, prot, flags) \
1821 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1822#elif defined(__sun)
1823/*
1824 * Hint an address within 32bit address space
1825 */
1826#define MMAP(addr, size, prot, flags) \
1827 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1828#else
1829/* *BSD */
1830#define MMAP(addr, size, prot, flags) \
1831 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1832#endif
1833
1834#endif /* have MAP_ANONYMOUS */
1835
1836
1837/*
1838  -----------------------  Chunk representations -----------------------
1839*/
1840
1841typedef void * mchunkptr;
1842
1843struct chunkinfo {
1844   INTERNAL_SIZE_T      prev_size;  /* Size of previous in bytes          */
1845   INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
1846   INTERNAL_SIZE_T      req;        /* Original request size, for guard.  */
1847   struct chunkinfo* hash_next;     /* contains a pointer to the next chunk
1848                                       in the linked list if the hash
1849                                       value is the same as the chunk     */
1850   struct chunkinfo* fd;            /* double links -- used only if free. */
1851   struct chunkinfo* bk;
1852   mchunkptr chunk;
1853};
1854
1855typedef struct chunkinfo* chunkinfoptr;
1856
1857struct cireginfo {
1858        unsigned long position;
1859        unsigned long *freebitmap;
1860        struct cireginfo* next;
1861        struct chunkinfo *freelist;
1862        struct chunkinfo *begin; 
1863        unsigned long freecounter; 
1864};
1865
1866/*
1867  ---------- Size and alignment checks and conversions ----------
1868*/
1869
1870/* conversion from malloc headers to user pointers, and back */
1871#define chunk(p) (p->chunk)
1872
1873
1874#define chunk2mem(p)   (chunk(p))
1875#define mem2chunk(mem) (hashtable_lookup(mem))
1876
1877/* The smallest possible chunk      */
1878/* #define MIN_CHUNK_SIZE        16 */
1879#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
1880#  define MIN_CHUNK_SIZE 32
1881#else
1882#  define MIN_CHUNK_SIZE 16
1883#endif
1884
1885/* The smallest size we can malloc is an aligned minimal chunk */
1886
1887#define MINSIZE  \
1888  (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1889
1890/* Check if m has acceptable alignment */
1891
1892#define aligned_OK(m)  (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1893
1894#define GUARD_SIZE 4
1895
1896/*
1897   Check if a request is so large that it would wrap around zero when
1898   padded and aligned. To simplify some other code, the bound is made
1899   low enough so that adding MINSIZE will also not wrap around zero.
1900
1901   Make it 4*MINSIZE.
1902*/
1903
1904#define REQUEST_OUT_OF_RANGE(req)                                 \
1905  ((CHUNK_SIZE_T)(req) >=                                         \
1906   (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))   
1907
1908/* pad request bytes into a usable size -- internal version */
1909
1910#define request2size(req)                                         \
1911  (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE)  ?         \
1912   ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1913   MINSIZE)
1914
1915/*  Same, except also perform argument check */
1916
1917#define checked_request2size(req, sz)                             \
1918  if (!REQUEST_OUT_OF_RANGE(req)) {                               \
1919    (sz) = request2size(req);                                     \
1920    assert((sz-req) >= GUARD_SIZE);                               \
1921  } else {                                                        \
1922    MALLOC_FAILURE_ACTION;                                        \
1923    return 0;                                                     \
1924  }
1925
1926#if PARANOIA > 2
1927static char * guard_set_p;
1928static char * guard_set_q;
1929
1930#define guard_set(guard, P, request, sz)                          \
1931  assert((sz-request) >= GUARD_SIZE);                             \
1932  guard_set_p = (char*)(chunk(P));                                \
1933  guard_set_p += request;                                         \
1934  VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE);            \
1935  guard_set_q = (char*)(guard);                                   \
1936  *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q;      \
1937  *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q;      \
1938  *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q;      \
1939  *guard_set_p = *guard_set_q;                                    \
1940  VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE);   \
1941  (P)->req = request
1942 
1943#define guard_check(guard, P)                                     \
1944  VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1945  assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1946  VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
1947
1948#else
1949#define guard_set(guard, P, request, sz) ((void)0)
1950#define guard_check(guard, P) ((void)0)
1951#endif /* PARANOIA > 2 */
1952
1953/* dnmalloc forward declarations */
1954static char * dnmalloc_arc4random(void);
1955static void dnmalloc_init (void);
1956static void malloc_mmap_state(void);
1957static void cireg_extend (void);
1958static chunkinfoptr cireg_getfree (void);
1959static void hashtable_add (chunkinfoptr ci);
1960static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1961static void hashtable_remove (mchunkptr p);             
1962static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1963static chunkinfoptr hashtable_lookup (mchunkptr p);
1964static chunkinfoptr next_chunkinfo (chunkinfoptr ci);         
1965static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1966
1967
1968
1969/*
1970  --------------- Physical chunk operations ---------------
1971*/
1972
1973
1974/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1975#define PREV_INUSE 0x1
1976
1977/* extract inuse bit of previous chunk */
1978#define prev_inuse(p)       ((p)->size & PREV_INUSE)
1979
1980/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1981#define IS_MMAPPED 0x2
1982
1983/* check for mmap()'ed chunk */
1984#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1985
1986
1987/* size field is or'ed when the chunk is in use */
1988#define INUSE 0x4
1989
1990/* extract inuse bit of chunk */
1991#define inuse(p)       ((p)->size & INUSE)
1992
1993/*
1994  Bits to mask off when extracting size
1995
1996  Note: IS_MMAPPED is intentionally not masked off from size field in
1997  macros for which mmapped chunks should never be seen. This should
1998  cause helpful core dumps to occur if it is tried by accident by
1999  people extending or adapting this malloc.
2000*/
2001#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
2002
2003/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
2004#define SIZE_NOMMAP (PREV_INUSE|INUSE)
2005
2006/* Get size, ignoring use bits */
2007#define chunksize(p)         ((p)->size & ~(SIZE_BITS))
2008
2009/* Ptr to chunkinfo of next physical malloc_chunk. */
2010#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2011
2012/* Treat space at ptr + offset as a chunk */
2013#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
2014
2015/* set/clear chunk as being inuse without otherwise disturbing */
2016#define set_inuse(p) ((p)->size |= INUSE)
2017
2018#define clear_inuse(p) ((p)->size &= ~(INUSE))
2019
2020#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2021
2022#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2023
2024static void set_previnuse_next (chunkinfoptr p)
2025{
2026   chunkinfoptr q;
2027   q = next_chunkinfo (p);
2028   if (q)
2029      set_previnuse (q);
2030}
2031
2032#define set_all_inuse(p) \
2033set_inuse(p); \
2034set_previnuse_next(p);
2035
2036
2037/* Set size at head, without disturbing its use bit */
2038#define set_head_size(p, s)  ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2039
2040/* Set size/use field */
2041#define set_head(p, s)       ((p)->size = (s))
2042
2043/*
2044  Bins
2045
2046    An array of bin headers for free chunks. Each bin is doubly
2047    linked.  The bins are approximately proportionally (log) spaced.
2048    There are a lot of these bins (128). This may look excessive, but
2049    works very well in practice.  Most bins hold sizes that are
2050    unusual as malloc request sizes, but are more usual for fragments
2051    and consolidated sets of chunks, which is what these bins hold, so
2052    they can be found quickly.  All procedures maintain the invariant
2053    that no consolidated chunk physically borders another one, so each
2054    chunk in a list is known to be preceeded and followed by either
2055    inuse chunks or the ends of memory.
2056
2057    Chunks in bins are kept in size order, with ties going to the
2058    approximately least recently used chunk. Ordering isn't needed
2059    for the small bins, which all contain the same-sized chunks, but
2060    facilitates best-fit allocation for larger chunks. These lists
2061    are just sequential. Keeping them in order almost never requires
2062    enough traversal to warrant using fancier ordered data
2063    structures. 
2064
2065    Chunks of the same size are linked with the most
2066    recently freed at the front, and allocations are taken from the
2067    back.  This results in LRU (FIFO) allocation order, which tends
2068    to give each chunk an equal opportunity to be consolidated with
2069    adjacent freed chunks, resulting in larger free chunks and less
2070    fragmentation.
2071
2072    To simplify use in double-linked lists, each bin header acts
2073    as a malloc_chunk. This avoids special-casing for headers.
2074    But to conserve space and improve locality, we allocate
2075    only the fd/bk pointers of bins, and then use repositioning tricks
2076    to treat these as the fields of a malloc_chunk*. 
2077*/
2078
2079typedef struct chunkinfo* mbinptr;
2080
2081/* addressing -- note that bin_at(0) does not exist */
2082#define bin_at(m, i) (&(m)->bins[i])
2083
2084/* analog of ++bin */
2085#define next_bin(b)  (b+1)
2086
2087/* Reminders about list directionality within bins */
2088#define first(b)     ((b)->fd)
2089#define last(b)      ((b)->bk)
2090
2091/* Take a chunk off a bin list */
2092#define unlink(P, BK, FD) {                                            \
2093  FD = P->fd;                                                          \
2094  BK = P->bk;                                                          \
2095  FD->bk = BK;                                                         \
2096  BK->fd = FD;                                                         \
2097}
2098
2099/*
2100  Indexing
2101
2102    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2103    8 bytes apart. Larger bins are approximately logarithmically spaced:
2104
2105    64 bins of size       8
2106    32 bins of size      64
2107    16 bins of size     512
2108     8 bins of size    4096
2109     4 bins of size   32768
2110     2 bins of size  262144
2111     1 bin  of size what's left
2112
2113    The bins top out around 1MB because we expect to service large
2114    requests via mmap.
2115*/
2116
2117#define NBINS              96
2118#define NSMALLBINS         32
2119#define SMALLBIN_WIDTH      8
2120#define MIN_LARGE_SIZE    256
2121
2122#define in_smallbin_range(sz)  \
2123  ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2124
2125#define smallbin_index(sz)     (((unsigned)(sz)) >> 3)
2126
2127/*
2128  Compute index for size. We expect this to be inlined when
2129  compiled with optimization, else not, which works out well.
2130*/
2131static int largebin_index(size_t sz) {
2132
2133  unsigned long  xx = sz >> SMALLBIN_WIDTH; 
2134
2135  if (xx < 0x10000) 
2136    {
2137      unsigned int  m;           /* bit position of highest set bit of m */
2138     
2139      /* On intel, use BSRL instruction to find highest bit */
2140#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2141
2142      unsigned int  x = (unsigned int) xx;
2143
2144      __asm__("bsrl %1,%0\n\t"
2145              : "=r" (m) 
2146              : "rm"  (x));
2147
2148#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2149
2150      __asm__("bsrq %1,%0\n\t"
2151              : "=r" (m)
2152              : "rm"  (xx));
2153
2154#else
2155
2156      /* Taken from Bit Twiddling Hacks
2157       * http://graphics.stanford.edu/~seander/bithacks.html
2158       * public domain
2159       */
2160      unsigned int  v  = (unsigned int) xx;
2161      register unsigned int shift;
2162     
2163      m =     (v > 0xFFFF) << 4; v >>= m;
2164      shift = (v > 0xFF  ) << 3; v >>= shift; m |= shift;
2165      shift = (v > 0xF   ) << 2; v >>= shift; m |= shift;
2166      shift = (v > 0x3   ) << 1; v >>= shift; m |= shift;
2167      m |= (v >> 1);
2168     
2169#endif
2170     
2171      /* Use next 2 bits to create finer-granularity bins */
2172      return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2173    }
2174  else
2175    {
2176      return NBINS-1;
2177    }
2178}
2179
2180#define bin_index(sz) \
2181 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2182
2183/*
2184  FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2185  first bin that is maintained in sorted order. This must
2186  be the smallest size corresponding to a given bin.
2187
2188  Normally, this should be MIN_LARGE_SIZE. But you can weaken
2189  best fit guarantees to sometimes speed up malloc by increasing value.
2190  Doing this means that malloc may choose a chunk that is
2191  non-best-fitting by up to the width of the bin.
2192
2193  Some useful cutoff values:
2194      512 - all bins sorted
2195     2560 - leaves bins <=     64 bytes wide unsorted 
2196    12288 - leaves bins <=    512 bytes wide unsorted
2197    65536 - leaves bins <=   4096 bytes wide unsorted
2198   262144 - leaves bins <=  32768 bytes wide unsorted
2199       -1 - no bins sorted (not recommended!)
2200*/
2201
2202/* #define FIRST_SORTED_BIN_SIZE 65536 */
2203
2204#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2205
2206
2207/*
2208  Unsorted chunks
2209
2210    All remainders from chunk splits, as well as all returned chunks,
2211    are first placed in the "unsorted" bin. They are then placed
2212    in regular bins after malloc gives them ONE chance to be used before
2213    binning. So, basically, the unsorted_chunks list acts as a queue,
2214    with chunks being placed on it in free (and malloc_consolidate),
2215    and taken off (to be either used or placed in bins) in malloc.
2216*/
2217
2218/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2219#define unsorted_chunks(M)          (bin_at(M, 1))
2220
2221/*
2222  Top
2223
2224    The top-most available chunk (i.e., the one bordering the end of
2225    available memory) is treated specially. It is never included in
2226    any bin, is used only if no other chunk is available, and is
2227    released back to the system if it is very large (see
2228    M_TRIM_THRESHOLD).  Because top initially
2229    points to its own bin with initial zero size, thus forcing
2230    extension on the first malloc request, we avoid having any special
2231    code in malloc to check whether it even exists yet. But we still
2232    need to do so when getting memory from system, so we make
2233    initial_top treat the bin as a legal but unusable chunk during the
2234    interval between initialization and the first call to
2235    sYSMALLOc. (This is somewhat delicate, since it relies on
2236    the 2 preceding words to be zero during this interval as well.)
2237*/
2238
2239/* Conveniently, the unsorted bin can be used as dummy top on first call */
2240#define initial_top(M)              (unsorted_chunks(M))
2241
2242/*
2243  Binmap
2244
2245    To help compensate for the large number of bins, a one-level index
2246    structure is used for bin-by-bin searching.  `binmap' is a
2247    bitvector recording whether bins are definitely empty so they can
2248    be skipped over during during traversals.  The bits are NOT always
2249    cleared as soon as bins are empty, but instead only
2250    when they are noticed to be empty during traversal in malloc.
2251*/
2252
2253/* Conservatively use 32 bits per map word, even if on 64bit system */
2254#define BINMAPSHIFT      5
2255#define BITSPERMAP       (1U << BINMAPSHIFT)
2256#define BINMAPSIZE       (NBINS / BITSPERMAP)
2257
2258#define idx2block(i)     ((i) >> BINMAPSHIFT)
2259#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2260
2261#define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
2262#define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2263#define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
2264
2265/*
2266  Fastbins
2267
2268    An array of lists holding recently freed small chunks.  Fastbins
2269    are not doubly linked.  It is faster to single-link them, and
2270    since chunks are never removed from the middles of these lists,
2271    double linking is not necessary. Also, unlike regular bins, they
2272    are not even processed in FIFO order (they use faster LIFO) since
2273    ordering doesn't much matter in the transient contexts in which
2274    fastbins are normally used.
2275
2276    Chunks in fastbins keep their inuse bit set, so they cannot
2277    be consolidated with other free chunks. malloc_consolidate
2278    releases all chunks in fastbins and consolidates them with
2279    other free chunks.
2280*/
2281
2282typedef struct chunkinfo* mfastbinptr;
2283
2284/* offset 2 to use otherwise unindexable first 2 bins */
2285#define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
2286
2287/* The maximum fastbin request size we support */
2288#define MAX_FAST_SIZE     80
2289
2290#define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2291
2292/*
2293  FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2294  that triggers automatic consolidation of possibly-surrounding
2295  fastbin chunks. This is a heuristic, so the exact value should not
2296  matter too much. It is defined at half the default trim threshold as a
2297  compromise heuristic to only attempt consolidation if it is likely
2298  to lead to trimming. However, it is not dynamically tunable, since
2299  consolidation reduces fragmentation surrounding loarge chunks even
2300  if trimming is not used.
2301*/
2302
2303#define FASTBIN_CONSOLIDATION_THRESHOLD  \
2304  ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2305
2306/*
2307  Since the lowest 2 bits in max_fast don't matter in size comparisons,
2308  they are used as flags.
2309*/
2310
2311/*
2312  ANYCHUNKS_BIT held in max_fast indicates that there may be any
2313  freed chunks at all. It is set true when entering a chunk into any
2314  bin.
2315*/
2316
2317#define ANYCHUNKS_BIT        (1U)
2318
2319#define have_anychunks(M)     (((M)->max_fast &  ANYCHUNKS_BIT))
2320#define set_anychunks(M)      ((M)->max_fast |=  ANYCHUNKS_BIT)
2321#define clear_anychunks(M)    ((M)->max_fast &= ~ANYCHUNKS_BIT)
2322
2323/*
2324  FASTCHUNKS_BIT held in max_fast indicates that there are probably
2325  some fastbin chunks. It is set true on entering a chunk into any
2326  fastbin, and cleared only in malloc_consolidate.
2327*/
2328
2329#define FASTCHUNKS_BIT        (2U)
2330
2331#define have_fastchunks(M)   (((M)->max_fast &  FASTCHUNKS_BIT))
2332#define set_fastchunks(M)    ((M)->max_fast |=  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2333#define clear_fastchunks(M)  ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2334
2335/*
2336   Set value of max_fast.
2337   Use impossibly small value if 0.
2338*/
2339
2340#define set_max_fast(M, s) \
2341  (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2342  ((M)->max_fast &  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2343
2344#define get_max_fast(M) \
2345  ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2346
2347
2348/*
2349  morecore_properties is a status word holding dynamically discovered
2350  or controlled properties of the morecore function
2351*/
2352
2353#define MORECORE_CONTIGUOUS_BIT  (1U)
2354
2355#define contiguous(M) \
2356        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT))
2357#define noncontiguous(M) \
2358        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT) == 0)
2359#define set_contiguous(M) \
2360        ((M)->morecore_properties |=  MORECORE_CONTIGUOUS_BIT)
2361#define set_noncontiguous(M) \
2362        ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2363
2364#define MORECORE_32BIT_BIT  (2U)
2365
2366#define morecore32bit(M) \
2367        (((M)->morecore_properties &  MORECORE_32BIT_BIT))
2368#define nonmorecore32bit(M) \
2369        (((M)->morecore_properties &  MORECORE_32BIT_BIT) == 0)
2370#define set_morecore32bit(M) \
2371        ((M)->morecore_properties |=  MORECORE_32BIT_BIT)
2372#define set_nonmorecore32bit(M) \
2373        ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2374
2375
2376
2377/* ----------------- dnmalloc -------------------- */
2378
2379/* size of pages */
2380#define PGSIZE malloc_getpagesize
2381/* pointer size */
2382#define PTRSIZE sizeof(long)
2383
2384
2385
2386/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2387   address space: the hashtable has granularity of 16*8, set it to something
2388   closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2389   page)
2390*/
2391
2392/* The maximum heap size that dnmalloc can operate with
2393 * represented in hex to avoid annoying gcc warning
2394 *
2395 * Avoid integer overflow, cover complete 32bit address
2396 * space for portability. With deferred allocation, the
2397 * hashtable size is a non-issue.
2398 */
2399#define HEAPMAXSIZE_HALF 0x80000000UL
2400
2401/* How many elements are stored in the linked list */
2402#define LINKEDLSTELS 8
2403
2404/* Minimum size of a chunk */
2405
2406#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2407#  define MINCHUNKSIZE 32
2408#else
2409#  define MINCHUNKSIZE 16
2410#endif
2411
2412
2413/* The amount of hashtable entries for each page:
2414Pagesize divded by the numer of elements in the linkedlists
2415divided by the minimum chunk size
2416*/
2417#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2418
2419/* The amount of hashtable entries needed to manage the memory:
2420Maximum heap size divided by page size multiplied by the amount
2421of chunk info's per page
2422*/
2423#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2424
2425/* Initial size of the map for the hashtable
2426Amount of entries muliplied by pointer size
2427*/
2428#define  HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2429
2430/* Amount of free chunks that the system should allocate at the start */
2431#define NUMBER_FREE_CHUNKS 32768
2432
2433/* Initial size of the chunk info region,
2434also used when growing the region */
2435#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2436
2437/* Start address of the heap */
2438char *startheap;
2439
2440/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2441chunkinfoptr *hashtable;
2442
2443/* Current chunkinfo region */
2444struct cireginfo *currciinfo = 0;
2445struct cireginfo *firstciinfo = 0;
2446
2447unsigned long totalcictr = 0;
2448
2449
2450/* Initialize the area for chunkinfos and the hashtable and protect
2451 * it with non-writable pages
2452 */
2453static void
2454dnmalloc_init ()
2455{
2456   void *hashtb;
2457   int mprot;
2458   int flags = MAP_PRIVATE;
2459
2460   /* Allocate the malloc_state struct */
2461   malloc_mmap_state();
2462
2463   /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2464    * systems use defered allocation anyway.
2465    */
2466#ifdef MAP_NORESERVE
2467   flags |= MAP_NORESERVE;
2468#endif
2469
2470   /* Always start at 0, hashtable covers whole 32bit address space
2471    */
2472#define STARTHEAP_IS_ZERO
2473   startheap = 0;
2474
2475   /* Map space for the hashtable */
2476#if PARANOIA > 1
2477   hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2478#else
2479   hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2480#endif
2481
2482#ifdef NDEBUG
2483   if (hashtb == MAP_FAILED) {
2484      fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2485      abort ();
2486   }
2487#else
2488   assert(hashtb != MAP_FAILED);
2489#endif
2490
2491   /* Protect the hashtable with non-writable pages */
2492   mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2493#ifdef NDEBUG
2494   if (mprot == -1) {
2495     fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2496              strerror (errno));
2497     abort ();
2498   }
2499#else
2500   assert(mprot != -1);
2501#endif
2502
2503   /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2504   hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2505
2506   /* Protect the hashtable with non-writable pages */
2507#if PARANOIA > 1
2508   mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2509#ifdef NDEBUG
2510   if (mprot == -1) {
2511     fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2512              strerror (errno));
2513     abort ();
2514   }
2515#else
2516   assert(mprot != -1);
2517#endif
2518#endif
2519}
2520
2521
2522
2523/* Extend the region for chunk infos by mapping more memory before the region */
2524static void
2525cireg_extend ()
2526{
2527   void *newcireg;
2528   int mprot;
2529   struct cireginfo *tempciinfo = 0;
2530   
2531#if PARANOIA > 1
2532   newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2533#else
2534   newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2535#endif
2536
2537#ifdef NDEBUG
2538   if (newcireg == MAP_FAILED)
2539   {
2540           fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2541                    strerror (errno));
2542           abort ();
2543   }
2544#else
2545   assert(newcireg != MAP_FAILED);
2546#endif
2547   mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2548#ifdef NDEBUG
2549   if (mprot == -1) {
2550           fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2551                    strerror (errno));
2552           abort ();
2553   }
2554#else
2555   assert(mprot != -1);
2556#endif
2557   newcireg = ((char*)newcireg)+PGSIZE;
2558   
2559#if PARANOIA > 1
2560   mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2561#ifdef NDEBUG
2562   if (mprot == -1) {
2563     fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2564              strerror (errno));
2565     abort ();
2566   }
2567#else
2568   assert(mprot != -1);
2569#endif
2570#endif
2571
2572   tempciinfo = currciinfo;
2573   currciinfo = (struct cireginfo *) newcireg;
2574   if (tempciinfo)
2575           tempciinfo->next = currciinfo;
2576   currciinfo->position = 1;
2577   currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2578   if (!firstciinfo)
2579           firstciinfo = currciinfo;
2580   totalcictr++;
2581   VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
2582}
2583
2584
2585/* Get a free chunkinfo */
2586static chunkinfoptr
2587cireg_getfree ()
2588{
2589   chunkinfoptr freeci;
2590   chunkinfoptr freelst = 0;
2591   struct cireginfo *newciinfo = firstciinfo;
2592   
2593   if (newciinfo) {
2594        freelst = newciinfo->freelist;
2595   
2596        if (!freelst && newciinfo->next) {
2597          do {
2598            newciinfo = newciinfo->next;
2599            freelst = newciinfo->freelist;
2600          } while (!freelst && newciinfo->next);
2601        }
2602   }
2603
2604   /* Check if there are any free chunkinfos on the list of free chunkinfos */
2605   if (freelst)
2606   {
2607     freeci = freelst;
2608     newciinfo->freecounter--;
2609     newciinfo->freelist = freelst->fd;
2610     
2611     VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci, 
2612                            sizeof(struct chunkinfo));
2613     
2614     freeci->prev_size = 0;
2615     freeci->size      = 0;
2616     freeci->req       = 0;
2617     freeci->hash_next = NULL;
2618     freeci->fd        = NULL;
2619     freeci->bk        = NULL;
2620     freeci->chunk     = NULL;
2621     return (freeci);
2622   }
2623   else
2624   {
2625     /* No free chunkinfos, check if chunkinfo region still has place
2626      * for a chunkinfo. If not, extend the region.
2627      */
2628     if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2629       cireg_extend ();
2630     /* Get a chunkinfo from the chunkinfo region */
2631     freeci = (chunkinfoptr) currciinfo + currciinfo->position; 
2632     currciinfo->freecounter--;
2633     currciinfo->position++;
2634
2635     VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci, 
2636                            sizeof(struct chunkinfo));
2637
2638     return (freeci);
2639   }
2640}
2641
2642static void freeciregion(struct cireginfo *freeme) {
2643  /* free the chunkinfo region */
2644  struct cireginfo *newciinfo = firstciinfo;
2645  struct cireginfo *prevciinfo = firstciinfo;
2646  void *unmapme;
2647
2648  while (newciinfo && newciinfo != freeme) {
2649    prevciinfo = newciinfo;
2650    newciinfo = newciinfo->next;
2651  }
2652  assert(freeme == newciinfo); /* rw */
2653  assert(newciinfo != NULL);   /* rw */
2654  if (newciinfo)
2655    prevciinfo->next = newciinfo->next;
2656  unmapme = (void *) ((char*)freeme - PGSIZE);
2657  VALGRIND_DESTROY_MEMPOOL((char*)freeme);
2658#if PARANOIA > 1
2659  munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2660#else
2661  munmap(unmapme, CIREGSIZE+PGSIZE);
2662#endif
2663}
2664
2665
2666static void freecilst_add(chunkinfoptr p) {
2667
2668  struct cireginfo *newciinfo;
2669
2670  newciinfo = currciinfo;
2671  if (((chunkinfoptr) newciinfo < p) && (<  (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2672    p->fd = newciinfo->freelist;
2673    newciinfo->freelist = p;
2674    newciinfo->freecounter++;
2675    VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2676    VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2677    VALGRIND_MAKE_MEM_NOACCESS(p->size,     sizeof(INTERNAL_SIZE_T));
2678    VALGRIND_MAKE_MEM_NOACCESS(p->req,      sizeof(INTERNAL_SIZE_T));
2679    VALGRIND_MAKE_MEM_NOACCESS(p->bk,       sizeof(struct chunkinfo*));
2680    VALGRIND_MAKE_MEM_NOACCESS(p->chunk,    sizeof(mchunkptr));
2681  } else {
2682    newciinfo = firstciinfo;
2683    if (newciinfo) {
2684      do {
2685        if (((chunkinfoptr) newciinfo < p) && (<  (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2686          p->fd = newciinfo->freelist;
2687          newciinfo->freelist = p;
2688          newciinfo->freecounter++;
2689          VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2690          VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2691          VALGRIND_MAKE_MEM_NOACCESS(p->size,     sizeof(INTERNAL_SIZE_T));
2692          VALGRIND_MAKE_MEM_NOACCESS(p->req,      sizeof(INTERNAL_SIZE_T));
2693          VALGRIND_MAKE_MEM_NOACCESS(p->bk,       sizeof(struct chunkinfo*));
2694          VALGRIND_MAKE_MEM_NOACCESS(p->chunk,    sizeof(mchunkptr));
2695          if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2696            freeciregion(newciinfo);
2697          break;
2698        }
2699        newciinfo = newciinfo->next;
2700      } while (newciinfo);
2701    }
2702  }
2703}
2704
2705/* Calculate the hash table entry for a chunk */
2706#ifdef STARTHEAP_IS_ZERO
2707#define hash(p)  (((unsigned long) p) >> 7)
2708#else
2709#define hash(p)  (((unsigned long) p - (unsigned long) startheap) >> 7)
2710#endif
2711
2712static void
2713hashtable_add (chunkinfoptr ci)
2714{
2715   chunkinfoptr temp, next;
2716   unsigned long hashval;
2717   mchunkptr cic = chunk (ci);
2718   
2719   hashval = hash (cic);
2720
2721   if (hashval < AMOUNTHASH) {
2722
2723     temp = hashtable[hashval];
2724
2725#ifdef DNMALLOC_DEBUG
2726     fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2727#endif
2728
2729     /* If no pointer to a chunk info list is stored at this location
2730      * in the hashtable or if the chunk's address is smaller than the
2731      * one present, add the chunk to the front of the linked list
2732      */
2733     if (temp == 0 || chunk (temp) > cic)
2734       {
2735         ci->hash_next = temp;
2736         hashtable[hashval] = ci;
2737         if (!temp) /* more likely case */
2738           goto out;
2739         temp->prev_size = chunksize(ci);
2740         return;
2741       }
2742     else
2743       {
2744         /* We must place the chunk in the linked list for this hashentry
2745          * Loop to end of list or to a position where temp's chunk's address
2746          * is larger than the new chunkinfo's chunk's address
2747          */
2748         if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2749           {
2750             ci->hash_next = temp->hash_next;
2751             temp->hash_next = ci;
2752           }
2753         else
2754           {
2755             while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2756               {
2757                 temp = temp->hash_next;
2758               }
2759             /* Place in linked list if not already there */
2760             if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2761               {
2762                 ci->hash_next = temp->hash_next;
2763                 temp->hash_next = ci;
2764               }
2765           }
2766       }
2767   }
2768   else {
2769#ifdef DNMALLOC_CHECKS
2770     if (hashval >= AMOUNTHASH) {
2771       fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2772           abort();
2773     }
2774#else
2775     assert(hashval < AMOUNTHASH);
2776#endif
2777   }
2778
2779 out:
2780   next = next_chunkinfo(ci);
2781   if (!next)
2782     return;
2783   next->prev_size = chunksize(ci);
2784}
2785
2786static void
2787hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2788{
2789   chunkinfoptr next;
2790
2791#ifdef DNMALLOC_DEBUG
2792   fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert), 
2793           (unsigned long)hash(chunk(ci_insert));
2794#endif
2795
2796   if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2797      hashtable_add(ci_insert); 
2798   }
2799   else {
2800
2801      ci_insert->hash_next = ci_orig->hash_next;
2802      ci_orig->hash_next = ci_insert;
2803
2804      /* added for prevsize */
2805      if (!(ci_insert->hash_next))
2806              next = next_chunkinfo(ci_insert);
2807      else
2808              next = ci_insert->hash_next;
2809
2810      if (!next)
2811        {
2812          ci_insert->prev_size = chunksize(ci_orig);
2813        }
2814      else
2815        {
2816          next->prev_size = chunksize(ci_insert);
2817          ci_insert->prev_size = chunksize(ci_orig);
2818        }
2819   }
2820}
2821
2822static void
2823hashtable_remove (mchunkptr p) 
2824{
2825  chunkinfoptr prevtemp, temp;
2826  unsigned long hashval;
2827 
2828  hashval = hash (p);
2829#ifdef DNMALLOC_DEBUG
2830  fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2831#endif
2832  assert(hashval < AMOUNTHASH); /* rw */
2833  prevtemp = temp = hashtable[hashval];
2834  if (chunk (temp) == p) {
2835    hashtable[hashval] = temp->hash_next;
2836  } 
2837  else
2838    {
2839      if (temp && chunk (temp) != p) {
2840        do
2841          {
2842            prevtemp = temp;
2843            temp = temp->hash_next;
2844          } while (temp && chunk (temp) != p);
2845      }
2846#ifdef DNMALLOC_CHECKS
2847      if (!temp) {
2848        fprintf (stderr,
2849                 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2850                 p, hashval);
2851        abort();
2852      }
2853#else
2854      assert(temp != NULL);
2855#endif
2856      if (temp) prevtemp->hash_next = temp->hash_next;
2857    }
2858}
2859
2860/* mmapped chunks are multiples of pagesize, no hash_nexts,
2861 * just remove from the hashtable
2862 */
2863#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2864
2865static void
2866hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2867{
2868   unsigned long hashval;
2869   chunkinfoptr next;
2870   
2871#ifdef DNMALLOC_DEBUG
2872   fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
2873#endif
2874
2875   if (ci_orig->hash_next != ci_todelete) {
2876     hashval = hash(chunk(ci_todelete));
2877     assert(hashval < AMOUNTHASH); /* rw */
2878#ifdef DNMALLOC_CHECKS
2879     if (hashtable[hashval] != ci_todelete ) {
2880           fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2881     }
2882#else
2883     assert(hashtable[hashval] == ci_todelete);
2884#endif
2885     hashtable[hashval] = ci_todelete->hash_next;
2886   }
2887
2888   else {
2889     ci_orig->hash_next = ci_todelete->hash_next;
2890     if (!ci_orig->hash_next) {
2891       next = next_chunkinfo(ci_orig);
2892     } else {
2893       next = ci_orig->hash_next;
2894     }
2895     if (next)
2896       next->prev_size = chunksize(ci_orig);
2897
2898   } 
2899}
2900
2901
2902static chunkinfoptr
2903hashtable_lookup (mchunkptr p)
2904{
2905   chunkinfoptr ci;
2906   unsigned long hashval;
2907   
2908   /* if we were called wrongly
2909    * if ((char *) p < startheap) return 0;
2910    */
2911   if ((char *) p >= startheap)
2912     {
2913       hashval = hash (p);
2914       assert(hashval < AMOUNTHASH); /* rw */
2915       ci = hashtable[hashval];
2916       if (ci && chunk (ci) == p)
2917         return ci;
2918
2919       if (ci) {
2920         do {
2921           ci = ci->hash_next;
2922         } while (ci && chunk (ci) != p);
2923       }
2924#ifdef DNMALLOC_CHECKS
2925       /* This should never occur but if it does, we'd like to know */
2926       if (!ci) {
2927         fprintf (stderr,
2928                  "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2929                  p, hashval);
2930         abort();
2931       }
2932#else
2933       assert(ci != NULL);
2934#endif
2935       return ci;
2936     }
2937   return 0;
2938}
2939
2940
2941
2942/*
2943   ----------- Internal state representation and initialization -----------
2944*/
2945
2946struct malloc_state {
2947
2948  /* The maximum chunk size to be eligible for fastbin */
2949  INTERNAL_SIZE_T  max_fast;   /* low 2 bits used as flags */
2950
2951  /* Fastbins */
2952  mfastbinptr      fastbins[NFASTBINS];
2953
2954  /* Base of the topmost chunk -- not otherwise kept in a bin */
2955  chunkinfoptr        top;
2956
2957  /* The remainder from the most recent split of a small request */
2958  chunkinfoptr        last_remainder;
2959
2960  /* Normal bins */
2961  struct chunkinfo bins[NBINS];
2962
2963  /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2964  unsigned int     binmap[BINMAPSIZE+1];
2965
2966  /* Tunable parameters */
2967  CHUNK_SIZE_T     trim_threshold;
2968  INTERNAL_SIZE_T  top_pad;
2969  INTERNAL_SIZE_T  mmap_threshold;
2970
2971  /* Memory map support */
2972  int              n_mmaps;
2973  int              n_mmaps_max;
2974  int              max_n_mmaps;
2975
2976  /* Cache malloc_getpagesize */
2977  unsigned int     pagesize; 
2978
2979  /* Canary */
2980  char             guard_stored[GUARD_SIZE];
2981
2982  /* Track properties of MORECORE */
2983  unsigned int     morecore_properties;
2984
2985  /* Statistics */
2986  INTERNAL_SIZE_T  mmapped_mem;
2987  INTERNAL_SIZE_T  sbrked_mem;
2988  INTERNAL_SIZE_T  max_sbrked_mem;
2989  INTERNAL_SIZE_T  max_mmapped_mem;
2990  INTERNAL_SIZE_T  max_total_mem;
2991};
2992
2993typedef struct malloc_state *mstate;
2994
2995/*
2996   There is exactly one instance of this struct in this malloc.
2997   If you are adapting this malloc in a way that does NOT use a static
2998   malloc_state, you MUST explicitly zero-fill it before using. This
2999   malloc relies on the property that malloc_state is initialized to
3000   all zeroes (as is true of C statics).
3001*/
3002
3003static struct malloc_state * av_ = NULL;  /* never directly referenced */
3004
3005/*
3006   All uses of av_ are via get_malloc_state().
3007   At most one "call" to get_malloc_state is made per invocation of
3008   the public versions of malloc and free, but other routines
3009   that in turn invoke malloc and/or free may call more then once.
3010   Also, it is called in check* routines if DEBUG is set.
3011*/
3012
3013#define get_malloc_state() (av_)
3014
3015/*
3016  Initialize a malloc_state struct.
3017
3018  This is called only from within malloc_consolidate, which needs
3019  be called in the same contexts anyway.  It is never called directly
3020  outside of malloc_consolidate because some optimizing compilers try
3021  to inline it at all call points, which turns out not to be an
3022  optimization at all. (Inlining it in malloc_consolidate is fine though.)
3023*/
3024
3025#if __STD_C
3026static void malloc_mmap_state(void)
3027#else
3028static void malloc_mmap_state()
3029#endif
3030{
3031  int mprot;
3032  unsigned long pagesize = malloc_getpagesize;
3033  size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3034
3035  void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3036
3037
3038#ifdef NDEBUG
3039   if (foo == MAP_FAILED) {
3040      fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3041      abort ();
3042   }
3043#else
3044   assert(foo != MAP_FAILED);
3045#endif
3046
3047   mprot = mprotect(foo, pagesize, PROT_NONE);
3048#ifdef NDEBUG
3049   if (mprot == -1) {
3050     fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3051              strerror (errno));
3052     abort ();
3053   }
3054#else
3055   assert(mprot != -1);
3056#endif
3057
3058   av_ = (struct malloc_state *) ((char*)foo + pagesize);
3059
3060   MALLOC_ZERO(av_, sizeof(struct malloc_state));
3061
3062   mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3063#ifdef NDEBUG
3064   if (mprot == -1) {
3065     fprintf (stderr, 
3066              "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3067              strerror (errno));
3068     abort ();
3069   }
3070#else
3071   assert(mprot != -1);
3072#endif
3073}
3074
3075#if __STD_C
3076static void malloc_init_state(mstate av)
3077#else
3078static void malloc_init_state(av) mstate av;
3079#endif
3080{
3081  int     i;
3082  mbinptr bin;
3083
3084  void *  morecore_test = MORECORE(0);
3085  unsigned long hashval;
3086
3087  /* Test morecore function
3088   */
3089  set_morecore32bit(av);
3090
3091  if (morecore_test == MORECORE_FAILURE)
3092    {
3093      set_nonmorecore32bit(av);
3094    }
3095  else
3096    {
3097      /* On 64bit systems, the heap may be located above the
3098       * 32bit address space. Since mmap() probably still can be
3099       * convinced to map within 32bit, we don't use sbrk().
3100       */
3101      hashval = hash (morecore_test);
3102      if (hashval >= AMOUNTHASH) 
3103        {
3104          set_nonmorecore32bit(av);
3105        }
3106    }
3107
3108 
3109  /* Establish circular links for normal bins */
3110  for (i = 1; i < NBINS; ++i) { 
3111    bin = bin_at(av,i);
3112    bin->fd = bin->bk = bin;
3113  }
3114
3115  av->top_pad        = DEFAULT_TOP_PAD;
3116  av->n_mmaps_max    = DEFAULT_MMAP_MAX;
3117  av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3118  av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3119
3120#if MORECORE_CONTIGUOUS
3121  set_contiguous(av);
3122#else
3123  set_noncontiguous(av);
3124#endif
3125
3126  set_max_fast(av, DEFAULT_MXFAST);
3127
3128  av->top = cireg_getfree ();
3129  av->top->chunk     = (mchunkptr) startheap;
3130  av->top->size      = 0;
3131  set_previnuse(av->top);
3132  clear_inuse(av->top);
3133  hashtable[0]       = av->top;
3134  av->pagesize       = malloc_getpagesize;
3135
3136  memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3137
3138}
3139
3140/*
3141   Other internal utilities operating on mstates
3142*/
3143
3144#if __STD_C
3145static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate);
3146static int      sYSTRIm(size_t, mstate);
3147static void     malloc_consolidate(mstate);
3148#else
3149static Void_t*  sYSMALLOc();
3150static int      sYSTRIm();
3151static void     malloc_consolidate();
3152#endif
3153
3154/* dnmalloc functions */
3155/* needs mstate so moved here */
3156
3157static chunkinfoptr
3158next_chunkinfo (chunkinfoptr ci)
3159{
3160   mchunkptr nextp;
3161   unsigned long hashval;
3162   chunkinfoptr cinfonextp;
3163   mstate av = get_malloc_state();
3164   
3165   /* ci is not the last element in the linked list, just
3166      return the next chunkinfo from the list
3167   */
3168   if (!ci->hash_next)
3169     {
3170       /* ci is the last element, find the next chunkinfo by
3171        * looking up the chunkinfo for the chunk that is after p's chunk
3172        */
3173       nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3174
3175       if (!(nextp == av->top->chunk)) 
3176         {
3177           hashval = hash (nextp);
3178           /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3179           cinfonextp = hashtable[hashval];
3180           if (cinfonextp && chunk (cinfonextp) == nextp)
3181             return cinfonextp; 
3182           
3183#ifdef DNMALLOC_CHECKS_EXTRA
3184           /* This seems bogus; a chunkinfo may legally have no nextp if
3185            * it's the last one allocated (?)
3186            */
3187           else {
3188             if (cinfonextp)
3189               fprintf (stderr,
3190                        "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3191                        chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3192             else
3193               fprintf (stderr,
3194                        "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3195                        chunk(ci), hashval, "null", "null", nextp);
3196           }
3197#endif
3198         
3199           return NULL;
3200         }
3201       else
3202         {
3203           return av->top;
3204         }
3205
3206     }
3207   else
3208     {
3209       return (ci->hash_next);
3210     }
3211}
3212
3213static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3214        mchunkptr nextp;
3215        if (oldp->hash_next == newp)
3216                return 1;
3217        nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3218        if (nextp == chunk(newp))
3219                return 1;
3220        return 0;
3221}
3222
3223
3224
3225/* Get the chunkinfo of the physically previous chunk */
3226/* Since we disposed of prev_size, we need this function to find the previous */
3227
3228static chunkinfoptr
3229prev_chunkinfo (chunkinfoptr ci)
3230{
3231   unsigned int i;
3232   chunkinfoptr prev;
3233   mchunkptr prevchunk = 0;
3234   /* chunkinfoptr temp; */
3235   
3236   /* Get the hashtable location of the chunkinfo */
3237   i = hash (chunk (ci));
3238   assert(i < AMOUNTHASH); /* rw */
3239     
3240   /* Get the first element of the linked list of chunkinfo's that contains p */
3241   prev = hashtable[i];
3242   
3243   if (ci == prev) {
3244     prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3245     i = hash(prevchunk);
3246     assert(i < AMOUNTHASH); /* rw */
3247     /* Loop over the linked list until we reach the last element */
3248     for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3249   } else {
3250     /* p is not the first element in the linked list, we can just
3251        loop over the list and return the previous
3252     */
3253     for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3254   }
3255
3256   return prev; 
3257}
3258
3259
3260/*
3261  Debugging support
3262  Dnmalloc broke dlmallocs debugging functions, should fix them some
3263  time in the future, for now leave them undefined.
3264*/
3265
3266#define check_chunk(P)
3267#define check_free_chunk(P)
3268#define check_inuse_chunk(P)
3269#define check_remalloced_chunk(P,N)
3270#define check_malloced_chunk(P,N)
3271#define check_malloc_state()
3272
3273
3274/* ----------- Routines dealing with system allocation -------------- */
3275
3276/*
3277  sysmalloc handles malloc cases requiring more memory from the system.
3278  On entry, it is assumed that av->top does not have enough
3279  space to service request for nb bytes, thus requiring that av->top
3280  be extended or replaced.
3281*/
3282
3283#if __STD_C
3284static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3285#else
3286static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3287#endif
3288{
3289  chunkinfoptr    old_top;        /* incoming value of av->top */
3290  INTERNAL_SIZE_T old_size;       /* its size */
3291  char*           old_end;        /* its end address */
3292
3293  long            size;           /* arg to first MORECORE or mmap call */
3294  char*           brk;            /* return value from MORECORE */
3295
3296  long            correction;     /* arg to 2nd MORECORE call */
3297  char*           snd_brk;        /* 2nd return val */
3298
3299  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3300  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
3301  char*           aligned_brk;    /* aligned offset into brk */
3302
3303  chunkinfoptr    p;              /* the allocated/returned chunk */
3304  chunkinfoptr    remainder;      /* remainder from allocation */
3305  chunkinfoptr    fencepost;      /* fencepost */
3306  CHUNK_SIZE_T    remainder_size; /* its size */
3307
3308  CHUNK_SIZE_T    sum;            /* for updating stats */
3309
3310  size_t          pagemask  = av->pagesize - 1;
3311
3312#ifdef DNMALLOC_DEBUG
3313  fprintf(stderr, "Enter sysmalloc\n");
3314#endif
3315  /*
3316    If there is space available in fastbins, consolidate and retry
3317    malloc from scratch rather than getting memory from system.  This
3318    can occur only if nb is in smallbin range so we didn't consolidate
3319    upon entry to malloc. It is much easier to handle this case here
3320    than in malloc proper.
3321  */
3322
3323
3324  if (have_fastchunks(av)) {
3325    Void_t * retval;
3326    assert(in_smallbin_range(nb));
3327    malloc_consolidate(av);
3328#ifdef DNMALLOC_DEBUG
3329    fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3330#endif
3331    retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3332    VALGRIND_FREELIKE_BLOCK(retval, 0);
3333    return retval;
3334  }
3335
3336
3337  /*
3338    If have mmap, and the request size meets the mmap threshold, and
3339    the system supports mmap, and there are few enough currently
3340    allocated mmapped regions, try to directly map this request
3341    rather than expanding top.
3342  */
3343
3344  if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3345               (av->n_mmaps < av->n_mmaps_max))) {
3346
3347    char* mm;             /* return value from mmap call*/
3348
3349    /*
3350      Round up size to nearest page.  For mmapped chunks, the overhead
3351      is one SIZE_SZ unit larger than for normal chunks, because there
3352      is no following chunk whose prev_size field could be used.
3353    */
3354    size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3355
3356    /* Don't try if size wraps around 0 */
3357    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3358           
3359
3360      mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3361     
3362      if (mm != (char*)(MORECORE_FAILURE)) {
3363       
3364        VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3365
3366        /*
3367          The offset to the start of the mmapped region is stored
3368          in the prev_size field of the chunk. This allows us to adjust
3369          returned start address to meet alignment requirements here
3370          and in memalign(), and still be able to compute proper
3371          address argument for later munmap in free() and realloc().
3372        */
3373       
3374        front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3375        p = cireg_getfree();
3376       
3377        if (front_misalign > 0) {
3378          correction = MALLOC_ALIGNMENT - front_misalign;
3379          p->chunk = (mchunkptr)(mm + correction);
3380          p->hash_next = (chunkinfoptr) correction;
3381          set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3382        }
3383        else {
3384          p->chunk = (mchunkptr)mm;
3385          p->hash_next = 0;
3386          set_head(p, size|INUSE|IS_MMAPPED);
3387        }
3388        hashtable_add(p);
3389        /* update statistics */
3390       
3391        if (++av->n_mmaps > av->max_n_mmaps) 
3392          av->max_n_mmaps = av->n_mmaps;
3393       
3394        sum = av->mmapped_mem += size;
3395        if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
3396          av->max_mmapped_mem = sum;
3397        sum += av->sbrked_mem;
3398        if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
3399          av->max_total_mem = sum;
3400
3401        check_chunk(p);
3402       
3403#ifdef DNMALLOC_DEBUG
3404        fprintf(stderr, "Return mmapped (%lu, total %lu)\n", 
3405                size, (unsigned long)/* size_t */av->max_total_mem );
3406#endif
3407        return chunk(p);
3408      }
3409    }
3410  }
3411
3412  /* Record incoming configuration of top */
3413
3414  old_top  = av->top;
3415  old_size = chunksize(old_top);
3416  old_end  = (char*)(chunk_at_offset(chunk(old_top), old_size));
3417
3418  brk = snd_brk = (char*)(MORECORE_FAILURE); 
3419
3420  /*
3421     If not the first time through, we require old_size to be
3422     at least MINSIZE and to have prev_inuse set.
3423  */
3424
3425  /* assert((old_top == initial_top(av) && old_size == 0) ||
3426         ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3427         prev_inuse(old_top))); */
3428
3429  /* Precondition: not enough current space to satisfy nb request */
3430  assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3431
3432  /* Precondition: all fastbins are consolidated */
3433  assert(!have_fastchunks(av));
3434
3435  /* Request enough space for nb + pad + overhead */
3436  size = nb + av->top_pad + MINSIZE;
3437
3438  /*
3439    If contiguous, we can subtract out existing space that we hope to
3440    combine with new space. We add it back later only if
3441    we don't actually get contiguous space.
3442  */
3443  if (contiguous(av))
3444    size -= old_size;
3445
3446  /*
3447    Round to a multiple of page size.
3448    If MORECORE is not contiguous, this ensures that we only call it
3449    with whole-page arguments.  And if MORECORE is contiguous and
3450    this is not first time through, this preserves page-alignment of
3451    previous calls. Otherwise, we correct to page-align below.
3452  */
3453
3454  size = (size + pagemask) & ~pagemask;
3455
3456  /*
3457    Don't try to call MORECORE if argument is so big as to appear
3458    negative. Note that since mmap takes size_t arg, it may succeed
3459    below even if we cannot call MORECORE.
3460  */
3461  if (size > 0 && morecore32bit(av)) 
3462    brk = (char*)(MORECORE(size));
3463
3464  /*
3465    If have mmap, try using it as a backup when MORECORE fails or
3466    cannot be used. This is worth doing on systems that have "holes" in
3467    address space, so sbrk cannot extend to give contiguous space, but
3468    space is available elsewhere.  Note that we ignore mmap max count
3469    and threshold limits, since the space will not be used as a
3470    segregated mmap region.
3471  */
3472  if (brk != (char*)(MORECORE_FAILURE)) {
3473    av->sbrked_mem += size;
3474    VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3475  }
3476
3477  else {
3478
3479#ifdef DNMALLOC_DEBUG
3480    fprintf(stderr, "Morecore failure in sysmalloc\n");
3481#endif
3482
3483    /* Cannot merge with old top, so add its size back in */
3484    if (contiguous(av))
3485      size = (size + old_size + pagemask) & ~pagemask;
3486
3487    /* If we are relying on mmap as backup, then use larger units */
3488    if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3489      size = MMAP_AS_MORECORE_SIZE;
3490
3491    /* Don't try if size wraps around 0 */
3492    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3493
3494#ifdef DNMALLOC_DEBUG
3495      fprintf(stderr, "Try mmap in sysmalloc\n");
3496#endif
3497      brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3498     
3499      if (brk != (char*)(MORECORE_FAILURE)) {
3500       
3501        VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3502
3503        av->mmapped_mem += size;
3504#ifdef DNMALLOC_DEBUG
3505        fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3506#endif
3507
3508        /* We do not need, and cannot use, another sbrk call to find end */
3509        snd_brk = brk + size;
3510       
3511        /*
3512           Record that we no longer have a contiguous sbrk region.
3513           After the first time mmap is used as backup, we do not
3514           ever rely on contiguous space since this could incorrectly
3515           bridge regions.
3516        */
3517        set_noncontiguous(av);
3518      }
3519    }
3520  }
3521
3522  if (brk != (char*)(MORECORE_FAILURE)) {
3523#ifdef DNMALLOC_DEBUG
3524    fprintf(stderr, "Success path %lu allocated, sbrked %lu\n", 
3525            size, (unsigned long)av->sbrked_mem);
3526#endif
3527    /* av->sbrked_mem += size; moved up */
3528
3529    /*
3530      If MORECORE extends previous space, we can likewise extend top size.
3531    */
3532   
3533    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3534      set_head(old_top, (size + old_size) | PREV_INUSE);
3535#ifdef DNMALLOC_DEBUG
3536      fprintf(stderr, "Previous space extended\n");
3537#endif
3538    }
3539
3540    /*
3541      Otherwise, make adjustments:
3542     
3543      * If the first time through or noncontiguous, we need to call sbrk
3544        just to find out where the end of memory lies.
3545
3546      * We need to ensure that all returned chunks from malloc will meet
3547        MALLOC_ALIGNMENT
3548
3549      * If there was an intervening foreign sbrk, we need to adjust sbrk
3550        request size to account for fact that we will not be able to
3551        combine new space with existing space in old_top.
3552
3553      * Almost all systems internally allocate whole pages at a time, in
3554        which case we might as well use the whole last page of request.
3555        So we allocate enough more memory to hit a page boundary now,
3556        which in turn causes future contiguous calls to page-align.
3557    */
3558   
3559    else {
3560      /* front_misalign = 0; *//*superfluous */
3561      /* end_misalign = 0; *//*superfluous */
3562      correction = 0;
3563      aligned_brk = brk;
3564
3565      /*
3566        If MORECORE returns an address lower than we have seen before,
3567        we know it isn't really contiguous.  This and some subsequent
3568        checks help cope with non-conforming MORECORE functions and
3569        the presence of "foreign" calls to MORECORE from outside of
3570        malloc or by other threads.  We cannot guarantee to detect
3571        these in all cases, but cope with the ones we do detect.
3572      */
3573      if (contiguous(av) && old_size != 0 && brk < old_end) {
3574        set_noncontiguous(av);
3575      }
3576     
3577      /* handle contiguous cases */
3578      if (contiguous(av)) { 
3579
3580#ifdef DNMALLOC_DEBUG
3581        fprintf(stderr, "Handle contiguous cases\n");
3582#endif
3583        /*
3584           We can tolerate forward non-contiguities here (usually due
3585           to foreign calls) but treat them as part of our space for
3586           stats reporting.
3587        */
3588        if (old_size != 0) 
3589          av->sbrked_mem += brk - old_end;
3590       
3591        /* Guarantee alignment of first new chunk made from this space */
3592
3593        front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3594        if (front_misalign > 0) {
3595
3596          /*
3597            Skip over some bytes to arrive at an aligned position.
3598            We don't need to specially mark these wasted front bytes.
3599            They will never be accessed anyway because
3600            prev_inuse of av->top (and any chunk created from its start)
3601            is always true after initialization.
3602          */
3603
3604          correction = MALLOC_ALIGNMENT - front_misalign;
3605          aligned_brk += correction;
3606        }
3607       
3608        /*
3609          If this isn't adjacent to existing space, then we will not
3610          be able to merge with old_top space, so must add to 2nd request.
3611        */
3612       
3613        correction += old_size;
3614       
3615        /* Extend the end address to hit a page boundary */
3616        end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3617        correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3618       
3619        assert(correction >= 0);
3620        snd_brk = (char*)(MORECORE(correction));
3621       
3622        if (snd_brk == (char*)(MORECORE_FAILURE)) {
3623          /*
3624            If can't allocate correction, try to at least find out current
3625            brk.  It might be enough to proceed without failing.
3626          */
3627          correction = 0;
3628          snd_brk = (char*)(MORECORE(0));
3629        }
3630        else if (snd_brk < brk) {
3631          /*
3632            If the second call gives noncontiguous space even though
3633            it says it won't, the only course of action is to ignore
3634            results of second call, and conservatively estimate where
3635            the first call left us. Also set noncontiguous, so this
3636            won't happen again, leaving at most one hole.
3637           
3638            Note that this check is intrinsically incomplete.  Because
3639            MORECORE is allowed to give more space than we ask for,
3640            there is no reliable way to detect a noncontiguity
3641            producing a forward gap for the second call.
3642          */
3643          snd_brk = brk + size;
3644          correction = 0;
3645          set_noncontiguous(av);
3646        }
3647        else {
3648          VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3649        }
3650
3651      }
3652     
3653      /* handle non-contiguous cases */
3654      else { 
3655
3656#ifdef DNMALLOC_DEBUG
3657        fprintf(stderr, "Handle non-contiguous cases\n");
3658#endif
3659
3660        /* MORECORE/mmap must correctly align */
3661        assert(aligned_OK(brk));
3662       
3663        /* Find out current end of memory */
3664        if (snd_brk == (char*)(MORECORE_FAILURE)) {
3665          snd_brk = (char*)(MORECORE(0));
3666          av->sbrked_mem += snd_brk - brk - size;
3667        }
3668#ifdef DNMALLOC_DEBUG
3669        fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3670#endif
3671      }
3672     
3673      /* Adjust top based on results of second sbrk.
3674       *
3675       * If mmap() has been used as backup for failed morecore(),
3676       * we end up in this branch as well.
3677       */
3678      if (snd_brk != (char*)(MORECORE_FAILURE)) {
3679#ifdef DNMALLOC_DEBUG
3680        fprintf(stderr, "Adjust top, correction %lu\n", correction);
3681#endif
3682        /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3683        av->top =  cireg_getfree();
3684        av->top->chunk = (mchunkptr)aligned_brk;
3685        set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3686#ifdef DNMALLOC_DEBUG
3687        fprintf(stderr, "Adjust top, top %p size %lu\n", 
3688                av->top, (unsigned long)chunksize(av->top));
3689#endif
3690        hashtable_add(av->top);
3691        av->sbrked_mem += correction;
3692     
3693        /*
3694          If not the first time through, we either have a
3695          gap due to foreign sbrk or a non-contiguous region.  Insert a
3696          double fencepost at old_top to prevent consolidation with space
3697          we don't own. These fenceposts are artificial chunks that are
3698          marked as inuse. Original dlmalloc had two of these but too
3699          small to use. To ensure that the linked lists contain a maximum
3700          of 8 elements we only use 1. Inuse is determined by the
3701          current rather than the next chunk anyway.
3702        */
3703   
3704        if (old_size != 0) {
3705#ifdef DNMALLOC_DEBUG
3706          fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3707#endif
3708          /*
3709             Shrink old_top to insert fenceposts, keeping size a
3710             multiple of MALLOC_ALIGNMENT. We know there is at least
3711             enough space in old_top to do this.
3712          */
3713#ifdef DNMALLOC_DEBUG
3714          fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n", 
3715                  old_top, (unsigned long)old_size);
3716#endif
3717          old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3718          set_head(old_top, old_size | PREV_INUSE);
3719#ifdef DNMALLOC_DEBUG
3720          fprintf(stderr, "Adjust top, old_size after %lu\n", 
3721                  (unsigned long)old_size);
3722#endif
3723         
3724          /*
3725            Note that the following assignments completely overwrite
3726            old_top when old_size was previously MINSIZE.  This is
3727            intentional. We need the fencepost, even if old_top otherwise gets
3728            lost.
3729          */
3730          /* dnmalloc, we need the fencepost to be 16 bytes, however since
3731             it's marked inuse it will never be coalesced
3732          */
3733          fencepost = cireg_getfree();
3734          fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top), 
3735                                                         old_size);
3736          fencepost->size = 16|INUSE|PREV_INUSE;
3737          hashtable_add(fencepost);
3738          /*
3739             If possible, release the rest, suppressing trimming.
3740          */
3741          if (old_size >= MINSIZE) {
3742            INTERNAL_SIZE_T tt = av->trim_threshold;
3743#ifdef DNMALLOC_DEBUG
3744            fprintf(stderr, "Release\n");
3745#endif
3746            av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3747            set_head(old_top, old_size | PREV_INUSE | INUSE);
3748            guard_set(av->guard_stored, old_top, 0, old_size);
3749            VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
3750            fREe(chunk(old_top));
3751            av->trim_threshold = tt;
3752#ifdef DNMALLOC_DEBUG
3753            fprintf(stderr, "Release done\n");
3754#endif
3755          }
3756
3757#ifdef DNMALLOC_DEBUG
3758          fprintf(stderr, "Adjust top, size %lu\n", 
3759                  (unsigned long)chunksize(av->top));
3760#endif
3761
3762        } /* fenceposts */
3763      } /* adjust top */
3764    } /* not extended previous region */
3765   
3766    /* Update statistics */ /* FIXME check this */
3767    sum = av->sbrked_mem;
3768    if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3769      av->max_sbrked_mem = sum;
3770   
3771    sum += av->mmapped_mem;
3772    if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3773      av->max_total_mem = sum;
3774
3775    check_malloc_state();
3776   
3777    /* finally, do the allocation */
3778
3779    p = av->top;
3780    size = chunksize(p);
3781   
3782#ifdef DNMALLOC_DEBUG
3783    fprintf(stderr, "Size: %lu  nb+MINSIZE: %lu\n", 
3784            (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3785#endif
3786
3787    /* check that one of the above allocation paths succeeded */
3788    if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3789      remainder_size = size - nb;
3790      remainder = cireg_getfree();
3791      remainder->chunk = chunk_at_offset(chunk(p), nb);
3792      av->top = remainder;
3793      set_head(p, nb | PREV_INUSE | INUSE);
3794      set_head(remainder, remainder_size | PREV_INUSE);
3795      hashtable_insert (p, av->top);
3796      check_malloced_chunk(p, nb);
3797#ifdef DNMALLOC_DEBUG
3798      fprintf(stderr, "Return any (total %lu)\n", 
3799              (unsigned long)/* size_t */av->max_total_mem );
3800#endif
3801      return chunk(p);
3802    }
3803
3804  }
3805
3806#ifdef DNMALLOC_DEBUG
3807  fprintf(stderr, "Return failed (total %lu)\n", 
3808          (unsigned long)/* size_t */av->max_total_mem );
3809#endif
3810
3811  /* catch all failure paths */
3812  MALLOC_FAILURE_ACTION;
3813  return 0;
3814}
3815
3816
3817
3818
3819/*
3820  sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back
3821  to the system (via negative arguments to sbrk) if there is unused
3822  memory at the `high' end of the malloc pool. It is called
3823  automatically by free() when top space exceeds the trim
3824  threshold. It is also called by the public malloc_trim routine.  It
3825  returns 1 if it actually released any memory, else 0.
3826*/
3827
3828#if __STD_C
3829static int sYSTRIm(size_t pad, mstate av)
3830#else
3831static int sYSTRIm(pad, av) size_t pad; mstate av;
3832#endif
3833{
3834  long  top_size;        /* Amount of top-most memory */
3835  long  extra;           /* Amount to release */
3836  long  released;        /* Amount actually released */
3837  char* current_brk;     /* address returned by pre-check sbrk call */
3838  char* new_brk;         /* address returned by post-check sbrk call */
3839  size_t pagesz;
3840
3841  pagesz = av->pagesize;
3842  top_size = chunksize(av->top);
3843 
3844  /* Release in pagesize units, keeping at least one page */
3845  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3846 
3847  if (extra > 0) {
3848   
3849    /*
3850      Only proceed if end of memory is where we last set it.
3851      This avoids problems if there were foreign sbrk calls.
3852    */
3853    current_brk = (char*)(MORECORE(0));
3854    if (current_brk == (char*)(av->top) + top_size) {
3855     
3856      /*
3857        Attempt to release memory. We ignore MORECORE return value,
3858        and instead call again to find out where new end of memory is.
3859        This avoids problems if first call releases less than we asked,
3860        of if failure somehow altered brk value. (We could still
3861        encounter problems if it altered brk in some very bad way,
3862        but the only thing we can do is adjust anyway, which will cause
3863        some downstream failure.)
3864      */
3865     
3866      MORECORE(-extra);
3867      new_brk = (char*)(MORECORE(0));
3868     
3869      if (new_brk != (char*)MORECORE_FAILURE) {
3870        released = (long)(current_brk - new_brk);
3871       
3872        if (released != 0) {
3873          /* Success. Adjust top. */
3874          av->sbrked_mem -= released;
3875          set_head(av->top, (top_size - released) | PREV_INUSE);
3876          check_malloc_state();
3877          return 1;
3878        }
3879      }
3880    }
3881  }
3882  return 0;
3883}
3884
3885/*
3886  ------------------------------ malloc ------------------------------
3887*/
3888
3889
3890#if __STD_C
3891DL_STATIC Void_t* mALLOc(size_t bytes)
3892#else
3893DL_STATIC   Void_t* mALLOc(bytes) size_t bytes;
3894#endif
3895{
3896  mstate av = get_malloc_state();
3897
3898  INTERNAL_SIZE_T nb;               /* normalized request size */
3899  unsigned int    idx;              /* associated bin index */
3900  mbinptr         bin;              /* associated bin */
3901  mfastbinptr*    fb;               /* associated fastbin */
3902
3903  chunkinfoptr       victim;           /* inspected/selected chunk */
3904  INTERNAL_SIZE_T size;             /* its size */
3905  int             victim_index;     /* its bin index */
3906
3907  chunkinfoptr       remainder;        /* remainder from a split */
3908  CHUNK_SIZE_T    remainder_size;   /* its size */
3909
3910  unsigned int    block;            /* bit map traverser */
3911  unsigned int    bit;              /* bit map traverser */
3912  unsigned int    map;              /* current word of binmap */
3913
3914  chunkinfoptr       fwd;              /* misc temp for linking */
3915  chunkinfoptr       bck;              /* misc temp for linking */
3916 
3917  Void_t*         retval;
3918
3919  /* chunkinfoptr         next; */
3920 
3921
3922  /*
3923    Convert request size to internal form by adding SIZE_SZ bytes
3924    overhead plus possibly more to obtain necessary alignment and/or
3925    to obtain a size of at least MINSIZE, the smallest allocatable
3926    size. Also, checked_request2size traps (returning 0) request sizes
3927    that are so large that they wrap around zero when padded and
3928    aligned.
3929  */
3930#if defined(SH_CUTEST)
3931  extern int malloc_count;
3932  ++malloc_count;
3933#endif
3934
3935  checked_request2size(bytes, nb);
3936
3937  /*
3938    Bypass search if no frees yet
3939   */
3940  if (av && have_anychunks(av)) {
3941    goto av_initialized;
3942  }
3943  else {
3944    if (!av || av->max_fast == 0) { /* initialization check */
3945      malloc_consolidate(av);
3946      av = get_malloc_state();
3947    }
3948    goto use_top;
3949  }
3950
3951 av_initialized:
3952
3953  /*
3954    If the size qualifies as a fastbin, first check corresponding bin.
3955  */
3956  if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3957    fb = &(av->fastbins[(fastbin_index(nb))]);
3958    if ( (victim = *fb) != 0) {
3959      *fb = victim->fd;
3960      check_remalloced_chunk(victim, nb);
3961      guard_set(av->guard_stored, victim, bytes, nb);
3962      VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
3963      return chunk(victim);
3964    }
3965  }
3966
3967  /*
3968    If a small request, check regular bin.  Since these "smallbins"
3969    hold one size each, no searching within bins is necessary.
3970    (For a large request, we need to wait until unsorted chunks are
3971    processed to find best fit. But for small ones, fits are exact
3972    anyway, so we can check now, which is faster.)
3973  */
3974
3975  if (in_smallbin_range(nb)) {
3976    idx = smallbin_index(nb);
3977    bin = bin_at(av,idx);
3978
3979    if ((victim = last(bin)) != bin) {
3980      bck = victim->bk;
3981      bin->bk = bck;
3982      bck->fd = bin;
3983
3984      set_all_inuse(victim);
3985           
3986      check_malloced_chunk(victim, nb);
3987      guard_set(av->guard_stored, victim, bytes, nb);
3988      VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
3989      return chunk(victim);
3990    }
3991  }
3992
3993  /*
3994     If this is a large request, consolidate fastbins before continuing.
3995     While it might look excessive to kill all fastbins before
3996     even seeing if there is space available, this avoids
3997     fragmentation problems normally associated with fastbins.
3998     Also, in practice, programs tend to have runs of either small or
3999     large requests, but less often mixtures, so consolidation is not
4000     invoked all that often in most programs. And the programs that
4001     it is called frequently in otherwise tend to fragment.
4002  */
4003
4004  else {
4005    idx = largebin_index(nb);
4006    if (have_fastchunks(av)) 
4007      malloc_consolidate(av);
4008  }
4009
4010  /*
4011    Process recently freed or remaindered chunks, taking one only if
4012    it is exact fit, or, if this a small request, the chunk is remainder from
4013    the most recent non-exact fit.  Place other traversed chunks in
4014    bins.  Note that this step is the only place in any routine where
4015    chunks are placed in bins.
4016  */
4017   
4018  while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4019    bck = victim->bk;
4020    size = chunksize(victim);
4021   
4022    /*
4023       If a small request, try to use last remainder if it is the
4024       only chunk in unsorted bin.  This helps promote locality for
4025       runs of consecutive small requests. This is the only
4026       exception to best-fit, and applies only when there is
4027       no exact fit for a small chunk.
4028    */
4029   
4030    if (UNLIKELY(in_smallbin_range(nb) && 
4031                 bck == unsorted_chunks(av) &&
4032                 victim == av->last_remainder &&
4033                 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4034     
4035      /* split and reattach remainder */
4036      remainder_size = size - nb;
4037      remainder = cireg_getfree();
4038      remainder->chunk = chunk_at_offset(chunk(victim), nb);
4039      unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4040      av->last_remainder = remainder; 
4041      remainder->bk = remainder->fd = unsorted_chunks(av);
4042     
4043      set_head(victim, nb | PREV_INUSE|INUSE);
4044      set_head(remainder, remainder_size | PREV_INUSE);     
4045      hashtable_insert(victim, remainder);
4046
4047      check_malloced_chunk(victim, nb);
4048      guard_set(av->guard_stored, victim, bytes, nb);
4049      VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4050      return chunk(victim);
4051    }
4052   
4053    /* remove from unsorted list */
4054    unsorted_chunks(av)->bk = bck;
4055    bck->fd = unsorted_chunks(av);
4056   
4057    /* Take now instead of binning if exact fit */
4058   
4059    if (UNLIKELY(size == nb)) {
4060      set_all_inuse(victim)
4061      check_malloced_chunk(victim, nb);
4062      guard_set(av->guard_stored, victim, bytes, nb);
4063      VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4064      return chunk(victim);
4065    }
4066   
4067    /* place chunk in bin */
4068   
4069    if (in_smallbin_range(size)) {
4070
4071      victim_index = smallbin_index(size);
4072      bck = bin_at(av, victim_index);
4073      fwd = bck->fd;
4074    }
4075    else {
4076      victim_index = largebin_index(size);
4077      bck = bin_at(av, victim_index);
4078      fwd = bck->fd;
4079     
4080      if (UNLIKELY(fwd != bck)) {
4081        /* if smaller than smallest, place first */
4082        if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4083          fwd = bck;
4084          bck = bck->bk;
4085        }
4086        else if ((CHUNK_SIZE_T)(size) >= 
4087                 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4088         
4089          /* maintain large bins in sorted order */
4090          size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4091          while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size)) 
4092            fwd = fwd->fd;
4093          bck = fwd->bk;
4094        }
4095      }
4096    }
4097
4098    mark_bin(av, victim_index);
4099    victim->bk = bck;
4100    victim->fd = fwd;
4101    fwd->bk = victim;
4102    bck->fd = victim;
4103  }
4104 
4105  /*
4106    If a large request, scan through the chunks of current bin to
4107    find one that fits.  (This will be the smallest that fits unless
4108    FIRST_SORTED_BIN_SIZE has been changed from default.)  This is
4109    the only step where an unbounded number of chunks might be
4110    scanned without doing anything useful with them. However the
4111    lists tend to be short.
4112  */
4113
4114  if (!in_smallbin_range(nb)) {
4115    bin = bin_at(av, idx);
4116   
4117    victim = last(bin);
4118
4119    if (UNLIKELY(victim != bin)) {
4120
4121      do {
4122        size = chunksize(victim);
4123     
4124        if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4125          remainder_size = size - nb;
4126          unlink(victim, bck, fwd);
4127       
4128          /* Split */
4129          if (remainder_size >= MINSIZE) {
4130            remainder = cireg_getfree();
4131            remainder->chunk = chunk_at_offset(chunk(victim), nb);
4132            unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4133            remainder->bk = remainder->fd = unsorted_chunks(av);
4134            set_head(victim, nb | PREV_INUSE | INUSE);
4135            set_head(remainder, remainder_size | PREV_INUSE);
4136            hashtable_insert(victim, remainder);
4137            check_malloced_chunk(victim, nb);
4138            guard_set(av->guard_stored, victim, bytes, nb);
4139            VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4140            return chunk(victim);
4141          } 
4142          /* Exhaust */
4143          else  {
4144            set_all_inuse(victim);
4145            check_malloced_chunk(victim, nb);
4146            guard_set(av->guard_stored, victim, bytes, nb);
4147            VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4148            return chunk(victim);
4149          }
4150        }
4151        victim = victim->bk;
4152      } while(victim != bin);
4153    }
4154  }
4155
4156  /*
4157    Search for a chunk by scanning bins, starting with next largest
4158    bin. This search is strictly by best-fit; i.e., the smallest
4159    (with ties going to approximately the least recently used) chunk
4160    that fits is selected.
4161   
4162    The bitmap avoids needing to check that most blocks are nonempty.
4163  */
4164   
4165
4166  ++idx;
4167  bin = bin_at(av,idx);
4168  block = idx2block(idx);
4169  map = av->binmap[block];
4170  bit = idx2bit(idx);
4171 
4172  for (;;) {
4173   
4174    /* Skip rest of block if there are no more set bits in this block.  */
4175    if (bit > map || bit == 0) {
4176      do {
4177        if (++block >= BINMAPSIZE)  /* out of bins */
4178          goto use_top;
4179      } while ( (map = av->binmap[block]) == 0);
4180     
4181      bin = bin_at(av, (block << BINMAPSHIFT));
4182      bit = 1;
4183    }
4184   
4185    /* Advance to bin with set bit. There must be one. */
4186    while ((bit & map) == 0) {
4187      bin = next_bin(bin);
4188      bit <<= 1;
4189      assert(bit != 0);
4190    }
4191   
4192    /* Inspect the bin. It is likely to be non-empty */
4193    victim = last(bin);
4194   
4195    /*  If a false alarm (empty bin), clear the bit. */
4196    if (victim == bin) {
4197      av->binmap[block] = map &= ~bit; /* Write through */
4198      bin = next_bin(bin);
4199      bit <<= 1;
4200    }
4201   
4202    else {
4203      size = chunksize(victim);
4204     
4205      /*  We know the first chunk in this bin is big enough to use. */
4206      assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4207     
4208      remainder_size = size - nb;
4209     
4210      /* unlink */
4211      bck = victim->bk;
4212      bin->bk = bck;
4213      bck->fd = bin;
4214     
4215      /* Split */
4216      if (remainder_size >= MINSIZE) {
4217        remainder = cireg_getfree();
4218        remainder->chunk = chunk_at_offset(chunk(victim), nb);
4219       
4220        unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4221        remainder->bk = remainder->fd = unsorted_chunks(av);
4222        /* advertise as last remainder */
4223        if (in_smallbin_range(nb))
4224          av->last_remainder = remainder;
4225       
4226        set_head(victim, nb | PREV_INUSE | INUSE);
4227        set_head(remainder, remainder_size | PREV_INUSE);
4228        hashtable_insert(victim, remainder);
4229        check_malloced_chunk(victim, nb);
4230        guard_set(av->guard_stored, victim, bytes, nb);
4231        VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4232        return chunk(victim);
4233      }
4234      /* Exhaust */
4235      else {
4236        set_all_inuse(victim);
4237        check_malloced_chunk(victim, nb);
4238        guard_set(av->guard_stored, victim, bytes, nb);
4239        VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4240        return chunk(victim);
4241      }
4242     
4243    }
4244  }
4245
4246  use_top:
4247   
4248
4249  /*
4250    If large enough, split off the chunk bordering the end of memory
4251    (held in av->top). Note that this is in accord with the best-fit
4252    search rule.  In effect, av->top is treated as larger (and thus
4253    less well fitting) than any other available chunk since it can
4254    be extended to be as large as necessary (up to system
4255    limitations).
4256   
4257    We require that av->top always exists (i.e., has size >=
4258    MINSIZE) after initialization, so if it would otherwise be
4259    exhuasted by current request, it is replenished. (The main
4260    reason for ensuring it exists is that we may need MINSIZE space
4261    to put in fenceposts in sysmalloc.)
4262  */
4263 
4264  victim = av->top;
4265  size = chunksize(victim);
4266 
4267  if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
4268    remainder = cireg_getfree();
4269    remainder_size = size - nb;
4270    remainder->chunk = chunk_at_offset(chunk(victim), nb);
4271    av->top = remainder;
4272    set_head(victim, nb | PREV_INUSE | INUSE);
4273    set_head(remainder, remainder_size | PREV_INUSE);
4274    hashtable_insert(victim, remainder);
4275    check_malloced_chunk(victim, nb);
4276    guard_set(av->guard_stored, victim, bytes, nb);
4277    VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
4278    return chunk(victim);
4279  }
4280 
4281  /*
4282     If no space in top, relay to handle system-dependent cases
4283  */
4284  retval = sYSMALLOc(nb, av);
4285  if (retval) {
4286#if PARANOIA > 2
4287    victim = mem2chunk(retval); /* is used in guard_set macro */
4288#endif
4289    guard_set(av->guard_stored, victim, bytes, nb);
4290  }
4291  VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
4292  return retval;
4293}
4294
4295/*
4296  ------------------------------ free ------------------------------
4297*/
4298
4299#if __STD_C
4300DL_STATIC void fREe(Void_t* mem)
4301#else
4302DL_STATIC void fREe(mem) Void_t* mem;
4303#endif
4304{
4305  mstate av = get_malloc_state();
4306
4307  chunkinfoptr       p;           /* chunk corresponding to mem */
4308  INTERNAL_SIZE_T size;        /* its size */
4309  mfastbinptr*    fb;          /* associated fastbin */
4310  chunkinfoptr       prevchunk;   /* previous physical chunk */
4311  chunkinfoptr       nextchunk;   /* next contiguous chunk */
4312  INTERNAL_SIZE_T nextsize;    /* its size */
4313  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
4314  chunkinfoptr       bck;         /* misc temp for linking */
4315  chunkinfoptr       fwd;         /* misc temp for linking */
4316  chunkinfoptr       next;
4317#if defined(SH_CUTEST)
4318  extern int malloc_count;
4319  --malloc_count;
4320#endif
4321
4322  /* free(0) has no effect */
4323  if (mem != 0) {
4324    p = hashtable_lookup(mem);
4325    /* check that memory is managed by us
4326     * and is inuse
4327     */
4328    if (UNLIKELY(!p || !inuse(p))) 
4329      {
4330#ifdef DNMALLOC_CHECKS
4331        if (p) {
4332          fprintf(stderr, "Attempt to free memory not in use\n");
4333          abort();
4334        } else {
4335          fprintf(stderr, "Attempt to free memory not allocated\n");
4336          abort();
4337        }
4338#endif
4339        assert(p && inuse(p));
4340        return;
4341      }
4342
4343    VALGRIND_FREELIKE_BLOCK(mem, 0);
4344 
4345    guard_check(av->guard_stored, p);
4346   
4347    size = chunksize(p);
4348
4349    check_inuse_chunk(p);
4350
4351    /*
4352      If eligible, place chunk on a fastbin so it can be found
4353      and used quickly in malloc.
4354    */
4355
4356    if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4357
4358#if TRIM_FASTBINS
4359        /*
4360           If TRIM_FASTBINS set, don't place chunks
4361           bordering top into fastbins
4362        */
4363        && (chunk_at_offset(chunk(p), size) != av->top)
4364#endif
4365        ) {
4366
4367      set_fastchunks(av);
4368      fb = &(av->fastbins[fastbin_index(size)]);
4369      p->fd = *fb;
4370      *fb = p;
4371    }
4372
4373    /*
4374       Consolidate other non-mmapped chunks as they arrive.
4375    */
4376
4377    else if (!chunk_is_mmapped(p)) {
4378      set_anychunks(av);
4379
4380      nextchunk = next_chunkinfo(p);
4381      if (nextchunk)
4382        nextsize = chunksize(nextchunk);
4383      else
4384        nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4385
4386      /* consolidate backward */
4387      if (UNLIKELY(!prev_inuse(p))) {
4388        prevchunk = prev_chunkinfo(p);
4389        prevsize = chunksize(prevchunk);
4390#ifdef DNMALLOC_CHECKS
4391        if (inuse(prevchunk)) {
4392                fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4393                abort();
4394        }
4395#else
4396        assert(!inuse(prevchunk));
4397#endif
4398        size += prevsize;
4399        unlink(prevchunk, bck, fwd);
4400        set_head(p, size | PREV_INUSE);
4401        hashtable_skiprm(prevchunk,p);
4402        /* This chunk no longer exists in any form: release the chunkinfoptr
4403         */
4404        freecilst_add(p);
4405        p = prevchunk;
4406      }
4407
4408      if (nextchunk) {
4409        if (nextchunk != av->top) {
4410          /* get and clear inuse bit */
4411          clear_previnuse(nextchunk);
4412         
4413          /* consolidate forward */
4414          if (!inuse(nextchunk)) {
4415            /* if mmap is used instead of sbrk, we may have a
4416             * chunk with !nextchunk->fd && !nextchunk->bk
4417             */
4418            if (nextchunk->fd && nextchunk->fd)
4419              {
4420                unlink(nextchunk, bck, fwd);
4421                size += nextsize;
4422                set_head(p, size | PREV_INUSE);
4423                hashtable_skiprm(p, nextchunk);
4424                freecilst_add (nextchunk);
4425              }
4426          }
4427         
4428          set_head(p, size | PREV_INUSE);
4429          next = next_chunkinfo(p);
4430          if (next)
4431            next->prev_size = size;
4432         
4433          /*
4434            Place the chunk in unsorted chunk list. Chunks are
4435            not placed into regular bins until after they have
4436            been given one chance to be used in malloc.
4437          */
4438         
4439          bck = unsorted_chunks(av);
4440          fwd = bck->fd;
4441          p->bk = bck;
4442          p->fd = fwd;
4443          bck->fd = p;
4444          fwd->bk = p;
4445         
4446          nextchunk = next_chunkinfo(p);
4447          if (nextchunk)
4448            nextchunk->prev_size = chunksize(p);       
4449         
4450          check_free_chunk(p);
4451        }
4452       
4453        /*
4454          If the chunk borders the current high end of memory,
4455          consolidate into top
4456        */
4457       
4458        else {
4459          size += nextsize;
4460          set_head(p, size | PREV_INUSE);
4461          hashtable_remove(chunk(av->top));
4462          freecilst_add(av->top);
4463          av->top = p;
4464          check_chunk(p);
4465        }
4466      } /* if (nextchunk) */
4467
4468      /*
4469        If freeing a large space, consolidate possibly-surrounding
4470        chunks. Then, if the total unused topmost memory exceeds trim
4471        threshold, ask malloc_trim to reduce top.
4472
4473        Unless max_fast is 0, we don't know if there are fastbins
4474        bordering top, so we cannot tell for sure whether threshold
4475        has been reached unless fastbins are consolidated.  But we
4476        don't want to consolidate on each free.  As a compromise,
4477        consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4478        is reached.
4479      */
4480
4481      if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) { 
4482        if (have_fastchunks(av)) 
4483          malloc_consolidate(av);
4484
4485#ifndef MORECORE_CANNOT_TRIM       
4486        if ((CHUNK_SIZE_T)(chunksize(av->top)) >= 
4487            (CHUNK_SIZE_T)(av->trim_threshold))
4488          {
4489            if (morecore32bit(av))
4490              {
4491#ifdef DNMALLOC_DEBUG
4492                fprintf(stderr, "Calling systrim from free()\n");
4493#endif
4494                sYSTRIm(av->top_pad, av);
4495#ifdef DNMALLOC_DEBUG
4496                fprintf(stderr, "Systrim done\n");
4497#endif
4498              }
4499          }
4500#endif
4501      }
4502
4503    }
4504    /*
4505      If the chunk was allocated via mmap, release via munmap()
4506      Note that if HAVE_MMAP is false but chunk_is_mmapped is
4507      true, then user must have overwritten memory. There's nothing
4508      we can do to catch this error unless DEBUG is set, in which case
4509      check_inuse_chunk (above) will have triggered error.
4510    */
4511
4512    else {
4513#if  PARANOIA > 0
4514      int ret;
4515#endif
4516      INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4517      av->n_mmaps--;
4518      av->mmapped_mem -= (size + offset);
4519#if  PARANOIA > 0
4520      ret = munmap((char*) chunk(p) - offset, size + offset);
4521#else
4522      munmap((char*) chunk(p) - offset, size + offset);
4523#endif
4524      hashtable_remove_mmapped(chunk(p));
4525      freecilst_add(p);
4526      /* munmap returns non-zero on failure */
4527      assert(ret == 0);
4528    }
4529  }
4530}
4531
4532/*
4533  ------------------------- malloc_consolidate -------------------------
4534
4535  malloc_consolidate is a specialized version of free() that tears
4536  down chunks held in fastbins.  Free itself cannot be used for this
4537  purpose since, among other things, it might place chunks back onto
4538  fastbins.  So, instead, we need to use a minor variant of the same
4539  code.
4540 
4541  Also, because this routine needs to be called the first time through
4542  malloc anyway, it turns out to be the perfect place to trigger
4543  initialization code.
4544*/
4545
4546#if __STD_C
4547static void malloc_consolidate(mstate av)
4548#else
4549static void malloc_consolidate(av) mstate av;
4550#endif
4551{
4552  mfastbinptr*    fb;                 /* current fastbin being consolidated */
4553  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
4554  chunkinfoptr       p;                  /* current chunk being consolidated */
4555  chunkinfoptr       nextp;              /* next chunk to consolidate */
4556  chunkinfoptr       prevp;
4557  chunkinfoptr       unsorted_bin;       /* bin header */
4558  chunkinfoptr       first_unsorted;     /* chunk to link to */
4559
4560  /* These have same use as in free() */
4561  chunkinfoptr       nextchunk;
4562  INTERNAL_SIZE_T size;
4563  INTERNAL_SIZE_T nextsize;
4564  INTERNAL_SIZE_T prevsize;
4565  chunkinfoptr       bck;
4566  chunkinfoptr       fwd;
4567  chunkinfoptr       next;
4568 
4569  /*
4570    If max_fast is 0, we know that av hasn't
4571    yet been initialized, in which case do so below
4572  */
4573  if (av && av->max_fast != 0) {
4574
4575    clear_fastchunks(av);
4576
4577    unsorted_bin = unsorted_chunks(av);
4578
4579    /*
4580      Remove each chunk from fast bin and consolidate it, placing it
4581      then in unsorted bin. Among other reasons for doing this,
4582      placing in unsorted bin avoids needing to calculate actual bins
4583      until malloc is sure that chunks aren't immediately going to be
4584      reused anyway.
4585    */
4586   
4587    maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4588    fb = &(av->fastbins[0]);
4589    do {
4590      if ( UNLIKELY((p = *fb) != 0)) {
4591        *fb = 0;
4592        do {
4593          check_inuse_chunk(p);
4594          nextp = p->fd;
4595         
4596          /*
4597           * Slightly streamlined version of consolidation code in free()
4598           */
4599
4600          size = chunksize(p);
4601          nextchunk = next_chunkinfo(p);
4602
4603          /* gcc doesn't notice that it's only used if (nextchunk) */
4604          if (nextchunk)
4605            nextsize = chunksize(nextchunk);
4606          else
4607            nextsize = 0; 
4608         
4609          if (!prev_inuse(p)) {
4610             prevp = prev_chunkinfo(p);
4611             prevsize = chunksize(prevp);
4612             size += prevsize;
4613#ifdef DNMALLOC_CHECKS
4614             if (inuse(prevp)) {
4615                fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4616                     abort();
4617             }
4618#else
4619             assert(!inuse(prevp));
4620#endif
4621             unlink(prevp, bck, fwd);
4622             set_head(p, size | PREV_INUSE);         
4623             hashtable_skiprm(prevp,p);
4624             freecilst_add(p);
4625             p=prevp;
4626          }
4627         
4628          if (nextchunk) {
4629            if (nextchunk != av->top) {
4630
4631              clear_previnuse(nextchunk);
4632           
4633              /* if mmap is used instead of sbrk, we may have a
4634               * chunk with !nextchunk->fd && !nextchunk->bk
4635               */
4636              if (!inuse(nextchunk)) {
4637                if( nextchunk->fd && nextchunk->bk) {
4638                  size += nextsize;
4639                  unlink(nextchunk, bck, fwd);
4640                  set_head(p, size | PREV_INUSE);
4641                  hashtable_skiprm(p,nextchunk);
4642                  freecilst_add(nextchunk);
4643                }
4644              }
4645             
4646              first_unsorted = unsorted_bin->fd;
4647              unsorted_bin->fd = p;
4648              first_unsorted->bk = p;
4649             
4650              set_head(p, size | PREV_INUSE);
4651              p->bk = unsorted_bin;
4652              p->fd = first_unsorted;
4653              next = next_chunkinfo(p);
4654              if (next)
4655                next->prev_size = size;
4656
4657           
4658            }
4659         
4660            else if (nextchunk == av->top) {
4661              size += nextsize;
4662              set_head(p, size | PREV_INUSE);
4663              hashtable_remove(chunk(av->top));
4664              freecilst_add(av->top);
4665              av->top = p;
4666            }
4667          } /* if (nextchunk) */
4668         
4669        } while ( (p = nextp) != 0);
4670       
4671      }
4672    } while (fb++ != maxfb);
4673  }
4674  else {
4675    /* Initialize dnmalloc */
4676    dnmalloc_init();
4677    malloc_init_state(get_malloc_state());
4678    check_malloc_state();
4679  }
4680}
4681
4682/*
4683  ------------------------------ realloc ------------------------------
4684*/
4685
4686
4687#if __STD_C
4688DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4689#else
4690DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4691#endif
4692{
4693  mstate av = get_malloc_state();
4694
4695  INTERNAL_SIZE_T  nb;              /* padded request size */
4696
4697  chunkinfoptr        oldp;            /* chunk corresponding to oldmem */
4698  INTERNAL_SIZE_T  oldsize;         /* its size */
4699
4700  chunkinfoptr        newp;            /* chunk to return */
4701  INTERNAL_SIZE_T  newsize;         /* its size */
4702  Void_t*          newmem;          /* corresponding user mem */
4703
4704  chunkinfoptr        next;            /* next contiguous chunk after oldp */
4705
4706  chunkinfoptr        remainder;       /* extra space at end of newp */
4707  CHUNK_SIZE_T     remainder_size;  /* its size */
4708
4709  chunkinfoptr        bck;             /* misc temp for linking */
4710  chunkinfoptr        fwd;             /* misc temp for linking */
4711
4712  CHUNK_SIZE_T     copysize;        /* bytes to copy */
4713  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
4714  INTERNAL_SIZE_T* s;               /* copy source */ 
4715  INTERNAL_SIZE_T* d;               /* copy destination */
4716
4717 
4718#ifdef REALLOC_ZERO_BYTES_FREES
4719  if (UNLIKELY(bytes == 0)) {
4720    fREe(oldmem);
4721    return 0;
4722  }
4723#endif
4724
4725  if (UNLIKELY(!av || av->max_fast == 0)) {
4726    malloc_consolidate(av);
4727    av = get_malloc_state();
4728  }
4729
4730  /* realloc of null is supposed to be same as malloc */
4731  if (UNLIKELY(oldmem == 0)) 
4732    return mALLOc(bytes);
4733
4734  checked_request2size(bytes, nb);
4735
4736  oldp    = hashtable_lookup(oldmem);
4737 
4738  if (UNLIKELY(!oldp || !inuse(oldp))){ 
4739     /* attempt to either realloc memory not managed by us
4740      * or memory that is not in use
4741      */
4742#ifdef DNMALLOC_CHECKS
4743    if (oldp) {
4744      fprintf(stderr, "Attempt to free memory not in use\n");
4745      abort();
4746    } else {
4747      fprintf(stderr, "Attempt to free memory not allocated\n");
4748      abort();
4749    }
4750#endif
4751    assert(oldp && inuse(oldp));
4752    return 0;     
4753  }
4754
4755  VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4756  guard_check(av->guard_stored, oldp);
4757
4758  oldsize = chunksize(oldp);
4759
4760  check_inuse_chunk(oldp);
4761
4762  if (!chunk_is_mmapped(oldp)) {
4763
4764    if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4765      /* already big enough; split below */
4766      newp    = oldp;
4767      newsize = oldsize;
4768    }
4769
4770    else {
4771      next = next_chunkinfo(oldp);
4772      if (next)
4773        next->prev_size = oldsize;
4774      /* Try to expand forward into top */
4775      if (next && next == av->top &&
4776          (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4777          (CHUNK_SIZE_T)(nb + MINSIZE)) {
4778         set_head_size(oldp, nb);
4779         hashtable_remove(chunk(av->top));
4780         av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4781         set_head(av->top, (newsize - nb) | PREV_INUSE);
4782         /* av->top->chunk has been moved move in hashtable */
4783         hashtable_insert(oldp, av->top);
4784         guard_set(av->guard_stored, oldp, bytes, nb);
4785         VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0); 
4786         return chunk(oldp);
4787      }
4788     
4789      /* Try to expand forward into next chunk;  split off remainder below */
4790      else if (next && next != av->top && 
4791               !inuse(next) &&
4792               (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4793               (CHUNK_SIZE_T)(nb)) {
4794        newp = oldp;
4795        unlink(next, bck, fwd);
4796        hashtable_remove(chunk(next));
4797        freecilst_add(next);
4798        next = next_chunkinfo(oldp);
4799        if (next)
4800          next->prev_size = newsize;
4801      }
4802
4803      /* allocate, copy, free */
4804      else {
4805
4806        newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4807         if (newmem == 0)
4808          return 0; /* propagate failure */
4809
4810        newp = hashtable_lookup(newmem);
4811        newsize = chunksize(newp);
4812       
4813 
4814        /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4815        /*
4816          Avoid copy if newp is next chunk after oldp.
4817        */
4818        if (UNLIKELY(is_next_chunk(oldp, newp))) {
4819          newsize += oldsize;
4820          set_head_size(oldp, newsize);
4821          hashtable_skiprm(oldp, newp);
4822          freecilst_add(newp);   
4823          newp = oldp;
4824        }
4825        else {
4826          /*
4827            Unroll copy of <= 40 bytes (80 if 8byte sizes)
4828            We know that contents have an even number of
4829            INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4830          */
4831         
4832          VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4833
4834          copysize = oldsize;
4835          s = (INTERNAL_SIZE_T*)(oldmem);
4836          d = (INTERNAL_SIZE_T*)(newmem);
4837          ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4838          assert(ncopies >= 2);
4839         
4840          if (ncopies > 10)
4841            MALLOC_COPY(d, s, copysize);
4842         
4843          else {
4844            *(d+0) = *(s+0);
4845            *(d+1) = *(s+1);
4846            if (ncopies > 2) {
4847              *(d+2) = *(s+2);
4848              *(d+3) = *(s+3);
4849              if (ncopies > 4) {
4850                *(d+4) = *(s+4);
4851                *(d+5) = *(s+5);
4852                if (ncopies > 6) {
4853                  *(d+6) = *(s+6);
4854                  *(d+7) = *(s+7);
4855                  if (ncopies > 8) {
4856                    *(d+8) = *(s+8);
4857                    *(d+9) = *(s+9);
4858                  }
4859                }
4860              }
4861            }
4862          }
4863         
4864          fREe(oldmem);
4865          check_inuse_chunk(newp);
4866          guard_set(av->guard_stored, newp, bytes, nb);
4867          return chunk(newp);
4868        }
4869      }
4870    }
4871
4872    /* If possible, free extra space in old or extended chunk */
4873
4874    assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4875
4876    remainder_size = newsize - nb;
4877
4878    if (remainder_size >= MINSIZE) { /* split remainder */
4879      remainder = cireg_getfree();
4880      remainder->chunk = chunk_at_offset(chunk(newp), nb);
4881      set_head_size(newp, nb);
4882      set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4883      remainder->prev_size = nb;
4884      hashtable_insert(newp, remainder);
4885      /* Mark remainder as inuse so free() won't complain */
4886      set_all_inuse(remainder);
4887      guard_set(av->guard_stored, remainder, 0, remainder_size);
4888      VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
4889      fREe(chunk(remainder)); 
4890    }
4891    else { /* not enough extra to split off */
4892      set_head_size(newp, newsize);
4893      set_all_inuse(newp);
4894    }
4895
4896    check_inuse_chunk(newp);
4897    guard_set(av->guard_stored, newp, bytes, nb);
4898    VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
4899    return chunk(newp);
4900  }
4901
4902  /*
4903    Handle mmap cases
4904  */
4905
4906  else {
4907
4908#if HAVE_MREMAP
4909    INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4910    size_t pagemask = av->pagesize - 1;
4911    char *cp;
4912    CHUNK_SIZE_T  sum;
4913   
4914
4915    /* Note the extra SIZE_SZ overhead */
4916    /* newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; */
4917    newsize = (nb + offset + pagemask) & ~pagemask;
4918
4919    /* don't need to remap if still within same page */
4920    if (oldsize == newsize - offset)
4921      {
4922        guard_set(av->guard_stored, oldp, bytes, nb);
4923        VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4924        VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
4925        return oldmem;
4926      }
4927
4928    cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4929   
4930    if (cp != (char*)MORECORE_FAILURE) {
4931       
4932      hashtable_remove_mmapped(chunk(oldp));
4933       
4934      oldp->chunk = (mchunkptr)(cp + offset);
4935      set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4936     
4937      hashtable_add(oldp);
4938     
4939      assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4940      assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4941     
4942      /* update statistics */
4943      sum = av->mmapped_mem += newsize - oldsize;
4944      if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
4945        av->max_mmapped_mem = sum;
4946      sum += av->sbrked_mem;
4947      if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
4948        av->max_total_mem = sum;
4949     
4950      guard_set(av->guard_stored, oldp, bytes, nb);
4951      VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4952      VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
4953      return chunk(oldp);
4954    }
4955#endif /* have MREMAP */
4956
4957    /* Note the extra SIZE_SZ overhead. */
4958    if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ)) 
4959      newmem = oldmem; /* do nothing */
4960    else {
4961      /* Must alloc, copy, free. */
4962      newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4963      if (newmem != 0) {
4964        MALLOC_COPY(newmem, oldmem, oldsize);
4965        fREe(oldmem);
4966      }
4967    }
4968    VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
4969    guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4970    return newmem;
4971  }
4972}
4973
4974/*
4975  ---------------------------posix_memalign ----------------------------
4976*/
4977
4978#if __STD_C
4979DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4980#else
4981DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4982#endif
4983{
4984  mstate av;
4985
4986  if (alignment % sizeof(void *) != 0)
4987    return EINVAL;
4988  if ((alignment & (alignment - 1)) != 0)
4989    return EINVAL;
4990
4991  av = get_malloc_state();
4992  if (!av || av->max_fast == 0) malloc_consolidate(av);
4993  *memptr =  mEMALIGn(alignment, bytes);
4994
4995  return (*memptr != NULL ? 0 : ENOMEM);
4996}
4997
4998/*
4999  ------------------------------ memalign ------------------------------
5000*/
5001
5002#if __STD_C
5003DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
5004#else
5005DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
5006#endif
5007{
5008  INTERNAL_SIZE_T nb;             /* padded  request size */
5009  char*           m;              /* memory returned by malloc call */
5010  chunkinfoptr       p;              /* corresponding chunk */
5011  char*           brk;            /* alignment point within p */
5012  chunkinfoptr       newp;           /* chunk to return */
5013  INTERNAL_SIZE_T newsize;        /* its size */
5014  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
5015  chunkinfoptr       remainder;      /* spare room at end to split off */
5016  CHUNK_SIZE_T    remainder_size; /* its size */
5017  INTERNAL_SIZE_T size;
5018#if PARANOIA > 2
5019  mstate          av;
5020#endif
5021
5022  /* If need less alignment than we give anyway, just relay to malloc */
5023
5024  if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
5025
5026  /* Otherwise, ensure that it is at least a minimum chunk size */
5027
5028  if (alignment <  MINSIZE) alignment = MINSIZE;
5029
5030  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
5031  if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5032    size_t a = MALLOC_ALIGNMENT * 2;
5033    while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5034    alignment = a;
5035  }
5036
5037  checked_request2size(bytes, nb);
5038
5039  /*
5040    Strategy: find a spot within that chunk that meets the alignment
5041    request, and then possibly free the leading and trailing space.
5042  */
5043
5044
5045  /* Call malloc with worst case padding to hit alignment. */
5046
5047  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
5048
5049  if (m == 0) return 0; /* propagate failure */
5050
5051#if PARANOIA > 2
5052  av = get_malloc_state();
5053#endif
5054
5055  p = hashtable_lookup((mchunkptr) m);
5056
5057  if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5058
5059    /*
5060      Find an aligned spot inside chunk.  Since we need to give back
5061      leading space in a chunk of at least MINSIZE, if the first
5062      calculation places us at a spot with less than MINSIZE leader,
5063      we can move to the next aligned spot -- we've allocated enough
5064      total room so that this is always possible.
5065    */
5066
5067    brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5068                           -((signed long) alignment)));
5069    if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5070      brk += alignment;
5071
5072    newp = cireg_getfree();
5073    newp->chunk = (mchunkptr)brk;
5074    leadsize = brk - (char*)(chunk(p));
5075    newsize = chunksize(p) - leadsize;
5076
5077    /* For mmapped chunks, just adjust offset */
5078    if (UNLIKELY(chunk_is_mmapped(p))) {
5079      newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5080      set_head(newp, newsize|IS_MMAPPED|INUSE);
5081      hashtable_remove_mmapped(chunk(p));
5082      freecilst_add(p);
5083      hashtable_add(newp);
5084      guard_set(av->guard_stored, newp, bytes, nb);
5085      return chunk(newp);
5086    }
5087
5088    /* Otherwise, give back leader, use the rest */
5089    set_head(newp, newsize | PREV_INUSE | INUSE);
5090    set_head_size(p, leadsize);
5091    set_all_inuse(newp);
5092    hashtable_add(newp); /* 20.05.2008 rw */
5093    guard_set(av->guard_stored, p, 0, leadsize);
5094    fREe(chunk(p));
5095    p = newp;
5096
5097    assert (newsize >= nb &&
5098            (((PTR_UINT)(chunk(p))) % alignment) == 0);
5099  }
5100
5101  /* Also give back spare room at the end */
5102  if (!chunk_is_mmapped(p)) {
5103    size = chunksize(p);
5104    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5105       remainder = cireg_getfree();
5106       remainder_size = size - nb;
5107       remainder->chunk = chunk_at_offset(chunk(p), nb);
5108       set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5109       set_head_size(p, nb);
5110       hashtable_add(remainder); /* 20.05.2008 rw */
5111       guard_set(av->guard_stored, remainder, 0, remainder_size);
5112       fREe(chunk(remainder));
5113    }
5114  }
5115
5116  check_inuse_chunk(p);
5117  guard_set(av->guard_stored, p, bytes, nb);
5118  return chunk(p);
5119}
5120
5121/*
5122  ------------------------------ calloc ------------------------------
5123*/
5124
5125#if __STD_C
5126DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5127#else
5128DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5129#endif
5130{
5131  chunkinfoptr p;
5132  CHUNK_SIZE_T  clearsize;
5133  CHUNK_SIZE_T  nclears;
5134  INTERNAL_SIZE_T* d;
5135  Void_t* mem;
5136 
5137 
5138  mem = mALLOc(n_elements * elem_size);
5139
5140  if (mem != 0) {
5141    p = hashtable_lookup(mem);
5142
5143    if (!chunk_is_mmapped(p))
5144    { 
5145      /*
5146        Unroll clear of <= 40 bytes (80 if 8byte sizes)
5147        We know that contents have an even number of
5148        INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5149      */
5150
5151      d = (INTERNAL_SIZE_T*)mem;
5152      clearsize = chunksize(p);
5153      nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5154      assert(nclears >= 2);
5155
5156      if (nclears > 10) {
5157        MALLOC_ZERO(d, clearsize);
5158      }
5159
5160      else {
5161        *(d+0) = 0;
5162        *(d+1) = 0;
5163        if (nclears > 2) {
5164          *(d+2) = 0;
5165          *(d+3) = 0;
5166          if (nclears > 4) {
5167            *(d+4) = 0;
5168            *(d+5) = 0;
5169            if (nclears > 6) {
5170              *(d+6) = 0;
5171              *(d+7) = 0;
5172              if (nclears > 8) {
5173                *(d+8) = 0;
5174                *(d+9) = 0;
5175              }
5176            }
5177          }
5178        }
5179      }
5180    }
5181#if ! MMAP_CLEARS
5182    else
5183    {
5184      d = (INTERNAL_SIZE_T*)mem;
5185      clearsize = chunksize(p);
5186      MALLOC_ZERO(d, clearsize);
5187    }
5188#endif
5189    /* Set guard again, since we just cleared it
5190     */
5191    guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5192  }
5193
5194  return mem;
5195}
5196
5197/*
5198  ------------------------------ valloc ------------------------------
5199*/
5200
5201#if __STD_C
5202DL_STATIC Void_t* vALLOc(size_t bytes)
5203#else
5204DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5205#endif
5206{
5207  /* Ensure initialization */
5208  mstate av = get_malloc_state();
5209  if (!av || av->max_fast == 0) {
5210    malloc_consolidate(av);
5211    av = get_malloc_state();
5212  }
5213  return mEMALIGn(av->pagesize, bytes);
5214}
5215
5216/*
5217  ------------------------------ pvalloc ------------------------------
5218*/
5219
5220
5221#if __STD_C
5222DL_STATIC Void_t* pVALLOc(size_t bytes)
5223#else
5224DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5225#endif
5226{
5227  mstate av = get_malloc_state();
5228  size_t pagesz;
5229
5230  /* Ensure initialization */
5231  if (!av || av->max_fast == 0) {
5232    malloc_consolidate(av);
5233    av = get_malloc_state();
5234  }
5235  pagesz = av->pagesize;
5236  return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5237}
5238   
5239
5240/*
5241  ------------------------------ malloc_trim ------------------------------
5242*/
5243
5244#if __STD_C
5245DL_STATIC int mTRIm(size_t pad)
5246#else
5247DL_STATIC int mTRIm(pad) size_t pad;
5248#endif
5249{
5250  mstate av = get_malloc_state();
5251  /* Ensure initialization/consolidation */
5252  malloc_consolidate(av);
5253  av = get_malloc_state();
5254#ifndef MORECORE_CANNOT_TRIM
5255  if (morecore32bit(av))
5256    return sYSTRIm(pad, av);
5257  else
5258    return 0;
5259#else
5260  return 0;
5261#endif
5262}
5263
5264
5265
5266/*
5267  ------------------------- malloc_usable_size -------------------------
5268*/
5269
5270#if __STD_C
5271DL_STATIC size_t mUSABLe(Void_t* mem)
5272#else
5273DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5274#endif
5275{
5276  chunkinfoptr p;
5277  if (mem != 0) {
5278    p = hashtable_lookup(mem);
5279    if (p && inuse(p)) return chunksize(p);
5280  }
5281  return 0;
5282}
5283
5284/*
5285  ------------------------------ mallinfo ------------------------------
5286*/
5287
5288DL_STATIC struct mallinfo mALLINFo()
5289{
5290  mstate av = get_malloc_state();
5291  struct mallinfo mi;
5292  unsigned int i;
5293  mbinptr b;
5294  chunkinfoptr p;
5295  INTERNAL_SIZE_T avail;
5296  INTERNAL_SIZE_T fastavail;
5297  int nblocks;
5298  int nfastblocks;
5299
5300  /* Ensure initialization */
5301  if (!av || av->top == 0) {
5302    malloc_consolidate(av);
5303    av = get_malloc_state();
5304  }
5305  check_malloc_state();
5306
5307  /* Account for top */
5308  avail = chunksize(av->top);
5309  nblocks = 1;  /* top always exists */
5310
5311  /* traverse fastbins */
5312  nfastblocks = 0;
5313  fastavail = 0;
5314
5315  for (i = 0; i < NFASTBINS; ++i) {
5316    for (p = av->fastbins[i]; p != 0; p = p->fd) {
5317      ++nfastblocks;
5318      fastavail += chunksize(p);
5319    }
5320  }
5321
5322  avail += fastavail;
5323
5324  /* traverse regular bins */
5325  for (i = 1; i < NBINS; ++i) {
5326    b = bin_at(av, i);
5327    for (p = last(b); p != b; p = p->bk) {
5328      ++nblocks;
5329      avail += chunksize(p);
5330    }
5331  }
5332
5333  mi.smblks = nfastblocks;
5334  mi.ordblks = nblocks;
5335  mi.fordblks = avail;
5336  mi.uordblks = av->sbrked_mem - avail;
5337  mi.arena = av->sbrked_mem;
5338  mi.hblks = av->n_mmaps;
5339  mi.hblkhd = av->mmapped_mem;
5340  mi.fsmblks = fastavail;
5341  mi.keepcost = chunksize(av->top);
5342  mi.usmblks = av->max_total_mem;
5343  return mi;
5344}
5345
5346/*
5347  ------------------------------ malloc_stats ------------------------------
5348*/
5349
5350DL_STATIC void mSTATs()
5351{
5352  struct mallinfo mi = mALLINFo();
5353
5354  fprintf(stderr, "hashtable = %10lu MB\n", 
5355          (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5356  fprintf(stderr, "max system bytes = %10lu\n",
5357          (CHUNK_SIZE_T)(mi.usmblks));
5358  fprintf(stderr, "system bytes     = %10lu  (%10lu sbrked, %10lu mmaped)\n",
5359          (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5360          (CHUNK_SIZE_T)(mi.arena),
5361          (CHUNK_SIZE_T)(mi.hblkhd));
5362  fprintf(stderr, "in use bytes     = %10lu\n",
5363          (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5364
5365}
5366
5367
5368/*
5369  ------------------------------ mallopt ------------------------------
5370*/
5371
5372#if __STD_C
5373DL_STATIC int mALLOPt(int param_number, int value)
5374#else
5375DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5376#endif
5377{
5378  mstate av = get_malloc_state();
5379  /* Ensure initialization/consolidation */
5380  malloc_consolidate(av);
5381  av = get_malloc_state();
5382
5383  switch(param_number) {
5384  case M_MXFAST:
5385    if (value >= 0 && value <= MAX_FAST_SIZE) {
5386      set_max_fast(av, value);
5387      return 1;
5388    }
5389    else
5390      return 0;
5391
5392  case M_TRIM_THRESHOLD:
5393    av->trim_threshold = value;
5394    return 1;
5395
5396  case M_TOP_PAD:
5397    av->top_pad = value;
5398    return 1;
5399
5400  case M_MMAP_THRESHOLD:
5401    av->mmap_threshold = value;
5402    return 1;
5403
5404  case M_MMAP_MAX:
5405    if (value != 0)
5406      return 0;
5407    av->n_mmaps_max = value;
5408    return 1;
5409
5410  default:
5411    return 0;
5412  }
5413}
5414
5415
5416/*      $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $     */
5417
5418/*
5419 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5420 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5421 *
5422 * Permission to use, copy, modify, and distribute this software for any
5423 * purpose with or without fee is hereby granted, provided that the above
5424 * copyright notice and this permission notice appear in all copies.
5425 *
5426 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5427 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5428 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5429 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5430 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5431 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5432 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5433 */
5434
5435/*
5436 * Arc4 random number generator for OpenBSD.
5437 *
5438 * This code is derived from section 17.1 of Applied Cryptography,
5439 * second edition, which describes a stream cipher allegedly
5440 * compatible with RSA Labs "RC4" cipher (the actual description of
5441 * which is a trade secret).  The same algorithm is used as a stream
5442 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5443 *
5444 * Here the stream cipher has been modified always to include the time
5445 * when initializing the state.  That makes it impossible to
5446 * regenerate the same random sequence twice, so this can't be used
5447 * for encryption, but will generate good random numbers.
5448 *
5449 * RC4 is a registered trademark of RSA Laboratories.
5450 */
5451
5452/* Moved u_int8_t -> unsigned char (portability)
5453 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5454 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5455 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5456 * This is arc4random(3) using urandom.
5457 */
5458
5459#include <fcntl.h>
5460#include <limits.h>
5461#include <stdlib.h>
5462#include <sys/param.h>
5463#include <sys/time.h>
5464
5465struct arc4_stream {
5466        unsigned char i;
5467        unsigned char j;
5468        unsigned char s[256];
5469};
5470
5471static int rs_initialized;
5472static struct arc4_stream rs;
5473static pid_t arc4_stir_pid;
5474static int arc4_count;
5475
5476static unsigned char arc4_getbyte(void);
5477
5478static void
5479arc4_init(void)
5480{
5481        int     n;
5482
5483        for (n = 0; n < 256; n++)
5484                rs.s[n] = n;
5485        rs.i = 0;
5486        rs.j = 0;
5487}
5488
5489static inline void
5490arc4_addrandom(unsigned char *dat, int datlen)
5491{
5492        int     n;
5493        unsigned char si;
5494
5495        rs.i--;
5496        for (n = 0; n < 256; n++) {
5497                rs.i = (rs.i + 1);
5498                si = rs.s[rs.i];
5499                rs.j = (rs.j + si + dat[n % datlen]);
5500                rs.s[rs.i] = rs.s[rs.j];
5501                rs.s[rs.j] = si;
5502        }
5503        rs.j = rs.i;
5504}
5505
5506#ifdef HAVE_SCHED_H
5507#include <sched.h>
5508#endif
5509
5510static void
5511arc4_stir(void)
5512{
5513        int     i;
5514        struct {
5515                struct timeval tv1;
5516                struct timeval tv2;
5517                u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5518        } rdat;
5519#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5520        ssize_t sz = 0;
5521        int    fd;
5522#endif
5523 
5524        gettimeofday(&rdat.tv1, NULL);
5525
5526
5527        if (!rs_initialized) {
5528                arc4_init();
5529                rs_initialized = 1;
5530        }
5531
5532#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5533
5534#ifdef HAVE_SCHED_YIELD
5535        /* Yield the processor to introduce some random delay. */
5536        (void) sched_yield();
5537#endif
5538
5539        /*
5540         * Pthread problem in multithreaded code on *BSD.
5541         */
5542        fd = open("/dev/urandom", O_RDONLY);
5543        if (fd != -1) {
5544                sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5545                /*
5546                 * gcc complains if we ignore the return value of read(), and
5547                 * the llvm/clang analyzer complains if we don't use it...
5548                 */
5549                if (sz > (-256)) /* always true */
5550                  close(fd);
5551        }
5552        /*
5553        if (sz > sizeof (rdat.rnd))
5554                sz = 0;
5555        */
5556 #endif
5557
5558        arc4_stir_pid = getpid();
5559        gettimeofday(&rdat.tv2, NULL);
5560
5561        arc4_addrandom((void *)&rdat, sizeof(rdat));
5562
5563        /*
5564         * Discard early keystream, as per recommendations in:
5565         * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5566         */
5567        for (i = 0; i < 256; i++)
5568                (void)arc4_getbyte();
5569        arc4_count = 1600000;
5570}
5571
5572static unsigned char
5573arc4_getbyte(void)
5574{
5575        unsigned char si, sj;
5576
5577        rs.i = (rs.i + 1);
5578        si = rs.s[rs.i];
5579        rs.j = (rs.j + si);
5580        sj = rs.s[rs.j];
5581        rs.s[rs.i] = sj;
5582        rs.s[rs.j] = si;
5583        return (rs.s[(si + sj) & 0xff]);
5584}
5585
5586
5587 /* Changed to return char* */
5588static char *
5589dnmalloc_arc4random(void)
5590{
5591        static char val[4];
5592       
5593        /* We only call this once, hence no need for locking. */
5594
5595        /* _ARC4_LOCK(); */
5596        arc4_count -= 4;
5597        if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5598                arc4_stir();
5599
5600        val[0] = (char) arc4_getbyte();
5601        val[1] = (char) arc4_getbyte();
5602        val[2] = (char) arc4_getbyte();
5603        val[3] = (char) arc4_getbyte();
5604
5605        arc4_stir();
5606        /* _ARC4_UNLOCK(); */
5607        return val;
5608}
5609
5610#else
5611int dnmalloc_pthread_init() { return 0; }
5612#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.