source: SVN/cambria/redboot/packages/services/memalloc/common/current/src/dlmalloc.cxx @ 1

Last change on this file since 1 was 1, checked in by Tim Harvey, 2 years ago

restored latest version of files from server backup

Signed-off-by: Tim Harvey <tharvey@…>

File size: 60.0 KB
Line 
1//==========================================================================
2//
3//      dlmalloc.cxx
4//
5//      Port of Doug Lea's malloc implementation
6//
7//==========================================================================
8//####ECOSGPLCOPYRIGHTBEGIN####
9// -------------------------------------------
10// This file is part of eCos, the Embedded Configurable Operating System.
11// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12//
13// eCos is free software; you can redistribute it and/or modify it under
14// the terms of the GNU General Public License as published by the Free
15// Software Foundation; either version 2 or (at your option) any later version.
16//
17// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18// WARRANTY; without even the implied warranty of MERCHANTABILITY or
19// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20// for more details.
21//
22// You should have received a copy of the GNU General Public License along
23// with eCos; if not, write to the Free Software Foundation, Inc.,
24// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25//
26// As a special exception, if other files instantiate templates or use macros
27// or inline functions from this file, or you compile this file and link it
28// with other works to produce a work based on this file, this file does not
29// by itself cause the resulting work to be covered by the GNU General Public
30// License. However the source code for this file must still be made available
31// in accordance with section (3) of the GNU General Public License.
32//
33// This exception does not invalidate any other reasons why a work based on
34// this file might be covered by the GNU General Public License.
35//
36// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37// at http://sources.redhat.com/ecos/ecos-license/
38// -------------------------------------------
39//####ECOSGPLCOPYRIGHTEND####
40//==========================================================================
41//#####DESCRIPTIONBEGIN####
42//
43// Author(s):    Doug Lea (dl at g.oswego.edu), jlarmour
44// Contributors:
45// Date:         2000-06-18
46// Purpose:      Doug Lea's malloc implementation
47// Description:  Doug Lea's malloc has been ported to eCos. This file
48//               provides the implementation in a way acceptable to eCos.
49//               Substantial amounts of unnecessary bits (to eCos) of the
50//               original implementation have been removed to make the
51//               code more tractable. Note this may make a number of the
52//               comments appear to make little sense, or no longer apply!
53//               In particular, mmap support is removed entirely.
54//               Also the memory is "sbrked" all at once at the
55//               beginning, covering the entire memory region given at
56//               construction, and there can be no more afterwards.
57// Usage:        #include <cyg/memalloc/dlmalloc.hxx>
58//             
59//
60//####DESCRIPTIONEND####
61//
62//==========================================================================
63
64// DOCUMENTATION FROM ORIGINAL FILE:
65// (some now irrelevant parts elided)
66
67//----------------------------------------------------------------------------
68
69/*
70  A version of malloc/free/realloc written by Doug Lea and released to the
71  public domain.  Send questions/comments/complaints/performance data
72  to dl at cs.oswego.edu
73
74* VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
75 
76   Note: There may be an updated version of this malloc obtainable at
77           ftp://g.oswego.edu/pub/misc/malloc.c
78         Check before installing!
79
80* Why use this malloc?
81
82  This is not the fastest, most space-conserving, most portable, or
83  most tunable malloc ever written. However it is among the fastest
84  while also being among the most space-conserving, portable and tunable.
85  Consistent balance across these factors results in a good general-purpose
86  allocator. For a high-level description, see
87     http://g.oswego.edu/dl/html/malloc.html
88
89* Synopsis of public routines
90
91  (Much fuller descriptions are contained in the program documentation below.)
92
93[ these have of course been renamed in the eCos port ]a
94
95  malloc(size_t n);
96     Return a pointer to a newly allocated chunk of at least n bytes, or null
97     if no space is available.
98  free(Void_t* p);
99     Release the chunk of memory pointed to by p, or no effect if p is null.
100  realloc(Void_t* p, size_t n);
101     Return a pointer to a chunk of size n that contains the same data
102     as does chunk p up to the minimum of (n, p's size) bytes, or null
103     if no space is available. The returned pointer may or may not be
104     the same as p. If p is null, equivalent to malloc. realloc of
105     zero bytes calls free(p)
106
107* Vital statistics:
108
109  Alignment:                            8-byte
110       8 byte alignment is currently hardwired into the design.  This
111       seems to suffice for all current machines and C compilers.
112
113  Assumed pointer representation:       4 or 8 bytes
114       Code for 8-byte pointers is untested by me but has worked
115       reliably by Wolfram Gloger, who contributed most of the
116       changes supporting this.
117
118  Assumed size_t  representation:       4 or 8 bytes
119       Note that size_t is allowed to be 4 bytes even if pointers are 8.       
120
121  Minimum overhead per allocated chunk: 4 or 8 bytes
122       Each malloced chunk has a hidden overhead of 4 bytes holding size
123       and status information. 
124
125  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
126                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
127                                     
128       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
129       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
130       needed; 4 (8) for a trailing size field
131       and 8 (16) bytes for free list pointers. Thus, the minimum
132       allocatable size is 16/24/32 bytes.
133
134       Even a request for zero bytes (i.e., malloc(0)) returns a
135       pointer to something of the minimum allocatable size.
136
137  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
138                          8-byte size_t: 2^63 - 16 bytes
139
140       It is assumed that (possibly signed) size_t bit values suffice to
141       represent chunk sizes. `Possibly signed' is due to the fact
142       that `size_t' may be defined on a system as either a signed or
143       an unsigned type. To be conservative, values that would appear
144       as negative numbers are avoided. 
145       Requests for sizes with a negative sign bit when the request
146       size is treaded as a long will return null.
147
148  Maximum overhead wastage per allocated chunk: normally 15 bytes
149
150       Alignnment demands, plus the minimum allocatable size restriction
151       make the normal worst-case wastage 15 bytes (i.e., up to 15
152       more bytes will be allocated than were requested in malloc), with
153       one exception: because requests for zero bytes allocate non-zero space,
154       the worst case wastage for a request of zero bytes is 24 bytes.
155
156* Limitations
157
158    Here are some features that are NOT currently supported
159
160    * No user-definable hooks for callbacks and the like.
161    * No automated mechanism for fully checking that all accesses
162      to malloced memory stay within their bounds.
163    * No support for compaction.
164
165* Synopsis of compile-time options:
166
167    People have reported using previous versions of this malloc on all
168    versions of Unix, sometimes by tweaking some of the defines
169    below. It has been tested most extensively on Solaris and
170    Linux. It is also reported to work on WIN32 platforms.
171    People have also reported adapting this malloc for use in
172    stand-alone embedded systems.
173
174    The implementation is in straight, hand-tuned ANSI C.  Among other
175    consequences, it uses a lot of macros.  Because of this, to be at
176    all usable, this code should be compiled using an optimizing compiler
177    (for example gcc -O2) that can simplify expressions and control
178    paths.
179
180  CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG      (default: NOT defined)
181     Define to enable debugging. Adds fairly extensive assertion-based
182     checking to help track down memory errors, but noticeably slows down
183     execution.
184  MALLOC_LOCK              (default: NOT defined)
185  MALLOC_UNLOCK            (default: NOT defined)
186     Define these to C expressions which are run to lock and unlock
187     the malloc data structures.  Calls may be nested; that is,
188     MALLOC_LOCK may be called more than once before the corresponding
189     MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
190     that it already holds.
191  MALLOC_ALIGNMENT          (default: NOT defined)
192     Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
193     which is the normal default.
194  SIZE_T_SMALLER_THAN_LONG (default: NOT defined)
195     Define this when the platform you are compiling has
196     sizeof(long) > sizeof(size_t).
197     The option causes some extra code to be generated to handle operations
198     that use size_t operands and have long results.
199  INTERNAL_SIZE_T           (default: size_t)
200     Define to a 32-bit type (probably `unsigned int') if you are on a
201     64-bit machine, yet do not want or need to allow malloc requests of
202     greater than 2^31 to be handled. This saves space, especially for
203     very small chunks.
204
205*/
206
207//----------------------------------------------------------------------------
208
209
210/* Preliminaries */
211
212#include <pkgconf/memalloc.h>          // configuration header
213#include <pkgconf/infra.h>             // CYGDBG_USE_ASSERTS
214#include <cyg/infra/cyg_type.h>        // types
215#include <cyg/infra/cyg_ass.h>         // assertions
216#include <stddef.h>                    // for size_t
217#include <cyg/memalloc/dlmalloc.hxx>
218
219/*
220    Debugging:
221
222    Because freed chunks may be overwritten with link fields, this
223    malloc will often die when freed memory is overwritten by user
224    programs.  This can be very effective (albeit in an annoying way)
225    in helping track down dangling pointers.
226
227    If you compile with CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG enabled, a
228    number of assertion checks are
229    enabled that will catch more memory errors. You probably won't be
230    able to make much sense of the actual assertion errors, but they
231    should help you locate incorrectly overwritten memory.  The
232    checking is fairly extensive, and will slow down execution
233    noticeably. Calling get_status() with DEBUG set will
234    attempt to check every allocated and free chunk in the
235    course of computing the summmaries.
236
237    Setting CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG may also be helpful if you
238    are trying to modify this code. The assertions in the check routines
239    spell out in more detail the assumptions and invariants underlying
240    the algorithms.
241
242*/
243
244#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
245# define ASSERT(x) CYG_ASSERTC( x )
246#else
247# define ASSERT(x) ((void)0)
248#endif
249
250
251/*
252   Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
253   lock and unlock the malloc data structures.  MALLOC_LOCK may be
254   called recursively.
255 */
256
257#ifndef MALLOC_LOCK
258#define MALLOC_LOCK
259#endif
260
261#ifndef MALLOC_UNLOCK
262#define MALLOC_UNLOCK
263#endif
264
265/*
266  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
267  of chunk sizes. On a 64-bit machine, you can reduce malloc
268  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
269  at the expense of not being able to handle requests greater than
270  2^31. This limitation is hardly ever a concern; you are encouraged
271  to set this. However, the default version is the same as size_t.
272*/
273 
274#ifndef INTERNAL_SIZE_T
275#define INTERNAL_SIZE_T Cyg_Mempool_dlmalloc_Implementation::Cyg_dlmalloc_size_t
276#endif
277
278/*
279  Following is needed on implementations whereby long > size_t.
280  The problem is caused because the code performs subtractions of
281  size_t values and stores the result in long values.  In the case
282  where long > size_t and the first value is actually less than
283  the second value, the resultant value is positive.  For example,
284  (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
285  which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
286  fact that assignment from unsigned to signed won't sign extend.
287*/
288
289#ifdef SIZE_T_SMALLER_THAN_LONG
290#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
291#else
292#define long_sub_size_t(x, y) ( (long)(x - y) )
293#endif
294
295
296#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY
297
298#include <string.h>                    // memcpy, memset
299
300/* The following macros are only invoked with (2n+1)-multiples of
301   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
302   for fast inline execution when n is small. */
303
304#define MALLOC_ZERO(charp, nbytes)                                            \
305do {                                                                          \
306  INTERNAL_SIZE_T mzsz = (nbytes);                                        \
307  if(mzsz <= 9*sizeof(mzsz)) {                                                \
308    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                 \
309    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
310                                     *mz++ = 0;                               \
311      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
312                                     *mz++ = 0;                               \
313        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
314                                     *mz++ = 0; }}}                           \
315                                     *mz++ = 0;                               \
316                                     *mz++ = 0;                               \
317                                     *mz   = 0;                               \
318  } else memset((charp), 0, mzsz);                                            \
319} while(0)
320
321#define MALLOC_COPY(dest,src,nbytes)                                          \
322do {                                                                          \
323  INTERNAL_SIZE_T mcsz = (nbytes);                                        \
324  if(mcsz <= 9*sizeof(mcsz)) {                                                \
325    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                \
326    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);               \
327    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
328                                     *mcdst++ = *mcsrc++;                     \
329      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
330                                     *mcdst++ = *mcsrc++;                     \
331        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
332                                     *mcdst++ = *mcsrc++; }}}                 \
333                                     *mcdst++ = *mcsrc++;                     \
334                                     *mcdst++ = *mcsrc++;                     \
335                                     *mcdst   = *mcsrc  ;                     \
336  } else memcpy(dest, src, mcsz);                                             \
337} while(0)
338
339#else /* !CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY */
340
341/* Use Duff's device for good zeroing/copying performance. */
342
343#define MALLOC_ZERO(charp, nbytes)                                            \
344do {                                                                          \
345  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                   \
346  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
347  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
348  switch (mctmp) {                                                            \
349    case 0: for(;;) { *mzp++ = 0;                                             \
350    case 7:           *mzp++ = 0;                                             \
351    case 6:           *mzp++ = 0;                                             \
352    case 5:           *mzp++ = 0;                                             \
353    case 4:           *mzp++ = 0;                                             \
354    case 3:           *mzp++ = 0;                                             \
355    case 2:           *mzp++ = 0;                                             \
356    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
357  }                                                                           \
358} while(0)
359
360#define MALLOC_COPY(dest,src,nbytes)                                          \
361do {                                                                          \
362  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                    \
363  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                   \
364  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
365  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
366  switch (mctmp) {                                                            \
367    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
368    case 7:           *mcdst++ = *mcsrc++;                                    \
369    case 6:           *mcdst++ = *mcsrc++;                                    \
370    case 5:           *mcdst++ = *mcsrc++;                                    \
371    case 4:           *mcdst++ = *mcsrc++;                                    \
372    case 3:           *mcdst++ = *mcsrc++;                                    \
373    case 2:           *mcdst++ = *mcsrc++;                                    \
374    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
375  }                                                                           \
376} while(0)
377
378#endif
379
380
381//----------------------------------------------------------------------------
382
383/*
384   malloc_chunk details:
385
386    (The following includes lightly edited explanations by Colin Plumb.)
387
388    Chunks of memory are maintained using a `boundary tag' method as
389    described in e.g., Knuth or Standish.  (See the paper by Paul
390    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
391    survey of such techniques.)  Sizes of free chunks are stored both
392    in the front of each chunk and at the end.  This makes
393    consolidating fragmented chunks into bigger chunks very fast.  The
394    size fields also hold bits representing whether chunks are free or
395    in use.
396
397    An allocated chunk looks like this: 
398
399
400    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
401            |             Size of previous chunk, if allocated            | |
402            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
403            |             Size of chunk, in bytes                         |P|
404      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
405            |             User data starts here...                          .
406            .                                                               .
407            .             (malloc_usable_space() bytes)                     .
408            .                                                               |
409nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
410            |             Size of chunk                                     |
411            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
412
413
414    Where "chunk" is the front of the chunk for the purpose of most of
415    the malloc code, but "mem" is the pointer that is returned to the
416    user.  "Nextchunk" is the beginning of the next contiguous chunk.
417
418    Chunks always begin on even word boundries, so the mem portion
419    (which is returned to the user) is also on an even word boundary, and
420    thus double-word aligned.
421
422    Free chunks are stored in circular doubly-linked lists, and look like this:
423
424    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
425            |             Size of previous chunk                            |
426            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
427    `head:' |             Size of chunk, in bytes                         |P|
428      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
429            |             Forward pointer to next chunk in list             |
430            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
431            |             Back pointer to previous chunk in list            |
432            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
433            |             Unused space (may be 0 bytes long)                .
434            .                                                               .
435            .                                                               |
436nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
437    `foot:' |             Size of chunk, in bytes                           |
438            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
439
440    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
441    chunk size (which is always a multiple of two words), is an in-use
442    bit for the *previous* chunk.  If that bit is *clear*, then the
443    word before the current chunk size contains the previous chunk
444    size, and can be used to find the front of the previous chunk.
445    (The very first chunk allocated always has this bit set,
446    preventing access to non-existent (or non-owned) memory.)
447
448    Note that the `foot' of the current chunk is actually represented
449    as the prev_size of the NEXT chunk. (This makes it easier to
450    deal with alignments etc).
451
452    The exception to all this is the special chunk `top', which doesn't
453    bother using the trailing size field since there is no next
454    contiguous chunk that would have to index off it. (After
455    initialization, `top' is forced to always exist. )
456
457    Available chunks are kept in any of several places (all declared below):
458
459    * `av': An array of chunks serving as bin headers for consolidated
460       chunks. Each bin is doubly linked.  The bins are approximately
461       proportionally (log) spaced.  There are a lot of these bins
462       (128). This may look excessive, but works very well in
463       practice.  All procedures maintain the invariant that no
464       consolidated chunk physically borders another one. Chunks in
465       bins are kept in size order, with ties going to the
466       approximately least recently used chunk.
467
468       The chunks in each bin are maintained in decreasing sorted order by
469       size.  This is irrelevant for the small bins, which all contain
470       the same-sized chunks, but facilitates best-fit allocation for
471       larger chunks. (These lists are just sequential. Keeping them in
472       order almost never requires enough traversal to warrant using
473       fancier ordered data structures.)  Chunks of the same size are
474       linked with the most recently freed at the front, and allocations
475       are taken from the back.  This results in LRU or FIFO allocation
476       order, which tends to give each chunk an equal opportunity to be
477       consolidated with adjacent freed chunks, resulting in larger free
478       chunks and less fragmentation.
479
480    * `top': The top-most available chunk (i.e., the one bordering the
481       end of available memory) is treated specially. It is never
482       included in any bin, is used only if no other chunk is
483       available.
484
485    * `last_remainder': A bin holding only the remainder of the
486       most recently split (non-top) chunk. This bin is checked
487       before other non-fitting chunks, so as to provide better
488       locality for runs of sequentially allocated chunks.
489
490*/
491
492typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mchunkptr;
493
494/*  sizes, alignments */
495
496#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
497
498#ifdef CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT
499#define MALLOC_ALIGNMENT (1<<(CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT))
500#endif
501
502#ifndef MALLOC_ALIGNMENT
503#define MALLOC_ALIGN           8
504#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
505#else
506#define MALLOC_ALIGN           MALLOC_ALIGNMENT
507#endif
508#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
509#define MINSIZE                \
510             (sizeof(struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk))
511
512/* conversion from malloc headers to user pointers, and back */
513
514#define chunk2mem(p)   ((cyg_uint8*)((char*)(p) + 2*SIZE_SZ))
515#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
516
517/* pad request bytes into a usable size */
518
519#define request2size(req) \
520 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
521  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
522   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
523
524/* Check if m has acceptable alignment */
525
526#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
527
528
529/*
530  Physical chunk operations 
531*/
532
533
534/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
535
536#define PREV_INUSE 0x1
537
538/* Bits to mask off when extracting size */
539
540#define SIZE_BITS (PREV_INUSE)
541
542
543/* Ptr to next physical malloc_chunk. */
544
545#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
546
547/* Ptr to previous physical malloc_chunk */
548
549#define prev_chunk(p)\
550   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
551
552
553/* Treat space at ptr + offset as a chunk */
554
555#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
556
557/*
558  Dealing with use bits
559*/
560
561/* extract p's inuse bit */
562
563#define inuse(p)\
564((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
565
566/* extract inuse bit of previous chunk */
567
568#define prev_inuse(p)  ((p)->size & PREV_INUSE)
569
570/* set/clear chunk as in use without otherwise disturbing */
571
572#define set_inuse(p)\
573((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
574
575#define clear_inuse(p)\
576((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
577
578/* check/set/clear inuse bits in known places */
579
580#define inuse_bit_at_offset(p, s)\
581 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
582
583#define set_inuse_bit_at_offset(p, s)\
584 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
585
586#define clear_inuse_bit_at_offset(p, s)\
587 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
588
589
590/*
591  Dealing with size fields
592*/
593
594/* Get size, ignoring use bits */
595
596#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
597
598/* Set size at head, without disturbing its use bit */
599
600#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
601
602/* Set size/use ignoring previous bits in header */
603
604#define set_head(p, s)        ((p)->size = (s))
605
606/* Set size at footer (only when chunk is not in use) */
607
608#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
609
610
611//----------------------------------------------------------------------------
612
613/*
614   Bins
615
616    The bins, `av_' are an array of pairs of pointers serving as the
617    heads of (initially empty) doubly-linked lists of chunks, laid out
618    in a way so that each pair can be treated as if it were in a
619    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
620    and chunks are the same).
621
622    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
623    8 bytes apart. Larger bins are approximately logarithmically
624    spaced. (See the table below.) The `av_' array is never mentioned
625    directly in the code, but instead via bin access macros.
626
627    Bin layout:
628
629    64 bins of size       8
630    32 bins of size      64
631    16 bins of size     512
632     8 bins of size    4096
633     4 bins of size   32768
634     2 bins of size  262144
635     1 bin  of size what's left
636
637    There is actually a little bit of slop in the numbers in bin_index
638    for the sake of speed. This makes no difference elsewhere.
639
640    The special chunks `top' and `last_remainder' get their own bins,
641    (this is implemented via yet more trickery with the av_ array),
642    although `top' is never properly linked to its bin since it is
643    always handled specially.
644
645*/
646
647typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mbinptr;
648
649/* access macros */
650
651#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
652#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
653#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
654
655/*
656   The first 2 bins are never indexed. The corresponding av_ cells are instead
657   used for bookkeeping. This is not to save space, but to simplify
658   indexing, maintain locality, and avoid some initialization tests.
659*/
660
661#define top            (bin_at(0)->fd)   /* The topmost chunk */
662#define last_remainder (bin_at(1))       /* remainder from last split */
663
664
665/* Helper macro to initialize bins */
666
667#define IAV(i)  bin_at(i), bin_at(i)
668
669#ifndef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
670static mbinptr av_[CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2] = {
671 0, 0,
672 IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
673 IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
674 IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
675 IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
676 IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
677 IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
678 IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
679 IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
680 IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
681 IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
682 IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
683 IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
684 IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
685 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
686 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
687 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
688};
689#endif
690
691/* field-extraction macros */
692
693#define first(b) ((b)->fd)
694#define last(b)  ((b)->bk)
695
696/*
697  Indexing into bins
698*/
699
700#define bin_index(sz)                                                          \
701(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
702 ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
703 ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
704 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
705 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
706 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
707                                          126)                     
708/*
709  bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
710  identically sized chunks. This is exploited in malloc.
711*/
712
713#define MAX_SMALLBIN_SIZE   512
714#define SMALLBIN_WIDTH        8
715#define SMALLBIN_WIDTH_BITS   3
716#define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
717
718#define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
719
720/*
721   Requests are `small' if both the corresponding and the next bin are small
722*/
723
724#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
725
726/*
727    To help compensate for the large number of bins, a one-level index
728    structure is used for bin-by-bin searching.  `binblocks' is a
729    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
730    have any (possibly) non-empty bins, so they can be skipped over
731    all at once during during traversals. The bits are NOT always
732    cleared as soon as all bins in a block are empty, but instead only
733    when all are noticed to be empty during traversal in malloc.
734*/
735
736#define BINBLOCKWIDTH     4   /* bins per block */
737
738#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
739
740/* bin<->block macros */
741
742#define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
743#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
744#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
745
746
747//----------------------------------------------------------------------------
748
749/*
750  Debugging support
751*/
752
753#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
754
755/*
756  These routines make a number of assertions about the states
757  of data structures that should be true at all times. If any
758  are not true, it's very likely that a user program has somehow
759  trashed memory. (It's also possible that there is a coding error
760  in malloc. In which case, please report it!)
761*/
762
763void
764Cyg_Mempool_dlmalloc_Implementation::do_check_chunk( mchunkptr p ) 
765{ 
766  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
767
768  /* Check for legal address ... */
769  ASSERT((cyg_uint8 *)p >= arenabase);
770  if (p != top) 
771    ASSERT((cyg_uint8 *)p + sz <= (cyg_uint8 *)top);
772  else
773    ASSERT((cyg_uint8 *)p + sz <= arenabase + arenasize);
774
775} // Cyg_Mempool_dlmalloc_Implementation::do_check_chunk()
776
777
778void
779Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk(mchunkptr p) 
780{ 
781  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
782  mchunkptr next = chunk_at_offset(p, sz);
783
784  do_check_chunk(p);
785
786  /* Check whether it claims to be free ... */
787  ASSERT(!inuse(p));
788
789  /* Unless a special marker, must have OK fields */
790  if ((long)sz >= (long)MINSIZE)
791  {
792    ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
793    ASSERT(aligned_OK(chunk2mem(p)));
794    /* ... matching footer field */
795    ASSERT(next->prev_size == sz);
796    /* ... and is fully consolidated */
797    ASSERT(prev_inuse(p));
798    ASSERT (next == top || inuse(next));
799   
800    /* ... and has minimally sane links */
801    ASSERT(p->fd->bk == p);
802    ASSERT(p->bk->fd == p);
803  }
804  else /* markers are always of size SIZE_SZ */
805    ASSERT(sz == SIZE_SZ); 
806} // Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk()
807
808void
809Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(mchunkptr p) 
810{ 
811  mchunkptr next = next_chunk(p);
812  do_check_chunk(p);
813
814  /* Check whether it claims to be in use ... */
815  ASSERT(inuse(p));
816
817  /* ... and is surrounded by OK chunks.
818    Since more things can be checked with free chunks than inuse ones,
819    if an inuse chunk borders them and debug is on, it's worth doing them.
820  */
821  if (!prev_inuse(p)) 
822  {
823    mchunkptr prv = prev_chunk(p);
824    ASSERT(next_chunk(prv) == p);
825    do_check_free_chunk(prv);
826  }
827  if (next == top)
828  {
829    ASSERT(prev_inuse(next));
830    ASSERT(chunksize(next) >= MINSIZE);
831  }
832  else if (!inuse(next))
833    do_check_free_chunk(next);
834
835} // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(
836
837void
838Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p,
839                                              INTERNAL_SIZE_T s) 
840{
841  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
842  long room = long_sub_size_t(sz, s);
843
844  do_check_inuse_chunk(p);
845
846  /* Legal size ... */
847  ASSERT((long)sz >= (long)MINSIZE);
848  ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
849  ASSERT(room >= 0);
850  ASSERT(room < (long)MINSIZE);
851
852  /* ... and alignment */
853  ASSERT(aligned_OK(chunk2mem(p)));
854
855
856  /* ... and was allocated at front of an available chunk */
857  ASSERT(prev_inuse(p));
858
859} // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(
860
861
862#define check_free_chunk(P)  do_check_free_chunk(P)
863#define check_inuse_chunk(P) do_check_inuse_chunk(P)
864#define check_chunk(P) do_check_chunk(P)
865#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
866#else
867#define check_free_chunk(P)
868#define check_inuse_chunk(P)
869#define check_chunk(P)
870#define check_malloced_chunk(P,N)
871#endif
872
873
874//----------------------------------------------------------------------------
875
876/*
877  Macro-based internal utilities
878*/
879
880
881/* 
882  Linking chunks in bin lists.
883  Call these only with variables, not arbitrary expressions, as arguments.
884*/
885
886/*
887  Place chunk p of size s in its bin, in size order,
888  putting it ahead of others of same size.
889*/
890
891
892#define frontlink(P, S, IDX, BK, FD)                                          \
893{                                                                             \
894  if (S < MAX_SMALLBIN_SIZE)                                                  \
895  {                                                                           \
896    IDX = smallbin_index(S);                                                  \
897    mark_binblock(IDX);                                                       \
898    BK = bin_at(IDX);                                                         \
899    FD = BK->fd;                                                              \
900    P->bk = BK;                                                               \
901    P->fd = FD;                                                               \
902    FD->bk = BK->fd = P;                                                      \
903  }                                                                           \
904  else                                                                        \
905  {                                                                           \
906    IDX = bin_index(S);                                                       \
907    BK = bin_at(IDX);                                                         \
908    FD = BK->fd;                                                              \
909    if (FD == BK) mark_binblock(IDX);                                         \
910    else                                                                      \
911    {                                                                         \
912      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
913      BK = FD->bk;                                                            \
914    }                                                                         \
915    P->bk = BK;                                                               \
916    P->fd = FD;                                                               \
917    FD->bk = BK->fd = P;                                                      \
918  }                                                                           \
919}
920
921
922/* take a chunk off a list */
923
924#define unlink(P, BK, FD)                                                     \
925{                                                                             \
926  BK = P->bk;                                                                 \
927  FD = P->fd;                                                                 \
928  FD->bk = BK;                                                                \
929  BK->fd = FD;                                                                \
930}                                                                             \
931
932/* Place p as the last remainder */
933
934#define link_last_remainder(P)                                                \
935{                                                                             \
936  last_remainder->fd = last_remainder->bk =  P;                               \
937  P->fd = P->bk = last_remainder;                                             \
938}
939
940/* Clear the last_remainder bin */
941
942#define clear_last_remainder \
943  (last_remainder->fd = last_remainder->bk = last_remainder)
944
945
946//----------------------------------------------------------------------------
947
948Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation(
949                                            cyg_uint8 *base, cyg_int32 size,
950                                            CYG_ADDRWORD /* argthru */ )
951{
952    arenabase = base;
953    arenasize = size;
954
955    CYG_ADDRESS front_misalign;
956    cyg_int32 correction;
957
958#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
959    cyg_ucount16 i;
960    av_[0] = av_[1] = 0;
961    for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) {
962        av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i);
963    } // for
964   
965#elif defined(CYGDBG_USE_ASSERTS)
966    static int instances;
967    if ( ++instances > 1 )
968        CYG_FAIL( "Multiple dlmalloc instances but "
969                  "CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE "
970                  "not defined" );
971#endif
972
973    front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK;
974
975    if ( front_misalign > 0 ) {
976        correction = (MALLOC_ALIGNMENT) - front_misalign;
977    } else {
978        correction = 0;
979    }
980
981    // too small to be useful?
982    if ( correction + 2*(unsigned)MALLOC_ALIGNMENT > (unsigned) size )
983        // help catch errors. Don't fail now.
984        arenabase = NULL; 
985    else {
986        top = (mchunkptr)(base + correction);
987        set_head(top, arenasize | PREV_INUSE);
988    }
989}
990
991//----------------------------------------------------------------------------
992
993/* Main public routines */
994
995/*
996  Malloc Algorithm:
997
998    The requested size is first converted into a usable form, `nb'.
999    This currently means to add 4 bytes overhead plus possibly more to
1000    obtain 8-byte alignment and/or to obtain a size of at least
1001    MINSIZE (currently 16 bytes), the smallest allocatable size.
1002    (All fits are considered `exact' if they are within MINSIZE bytes.)
1003
1004    From there, the first successful of the following steps is taken:
1005
1006      1. The bin corresponding to the request size is scanned, and if
1007         a chunk of exactly the right size is found, it is taken.
1008
1009      2. The most recently remaindered chunk is used if it is big
1010         enough.  This is a form of (roving) first fit, used only in
1011         the absence of exact fits. Runs of consecutive requests use
1012         the remainder of the chunk used for the previous such request
1013         whenever possible. This limited use of a first-fit style
1014         allocation strategy tends to give contiguous chunks
1015         coextensive lifetimes, which improves locality and can reduce
1016         fragmentation in the long run.
1017
1018      3. Other bins are scanned in increasing size order, using a
1019         chunk big enough to fulfill the request, and splitting off
1020         any remainder.  This search is strictly by best-fit; i.e.,
1021         the smallest (with ties going to approximately the least
1022         recently used) chunk that fits is selected.
1023
1024      4. If large enough, the chunk bordering the end of memory
1025         (`top') is split off. (This use of `top' is in accord with
1026         the best-fit search rule.  In effect, `top' is treated as
1027         larger (and thus less well fitting) than any other available
1028         chunk since it can be extended to be as large as necessary
1029         (up to system limitations).
1030
1031      All allocations are made from the the `lowest' part of any found
1032      chunk. (The implementation invariant is that prev_inuse is
1033      always true of any allocated chunk; i.e., that each allocated
1034      chunk borders either a previously allocated and still in-use chunk,
1035      or the base of its memory arena.)
1036
1037*/
1038
1039cyg_uint8 *
1040Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes )
1041{
1042  mchunkptr victim;                  /* inspected/selected chunk */
1043  INTERNAL_SIZE_T victim_size;   /* its size */
1044  int       idx;                     /* index for bin traversal */
1045  mbinptr   bin;                     /* associated bin */
1046  mchunkptr remainder;               /* remainder from a split */
1047  long      remainder_size;          /* its size */
1048  int       remainder_index;         /* its bin index */
1049  unsigned long block;               /* block traverser bit */
1050  int       startidx;                /* first bin of a traversed block */
1051  mchunkptr fwd;                     /* misc temp for linking */
1052  mchunkptr bck;                     /* misc temp for linking */
1053  mbinptr q;                         /* misc temp */
1054
1055  INTERNAL_SIZE_T nb;
1056
1057  /*  Allow uninitialised (zero sized) heaps because they could exist as a
1058   *  quirk of the MLT setup where a dynamically sized heap is at the top of
1059   *  memory. */
1060  if (NULL==arenabase) return NULL;
1061
1062  if ((long)bytes < 0) return 0;
1063
1064  nb = request2size(bytes);  /* padded request size; */
1065
1066  MALLOC_LOCK;
1067
1068  /* Check for exact match in a bin */
1069
1070  if (is_small_request(nb))  /* Faster version for small requests */
1071  {
1072    idx = smallbin_index(nb); 
1073
1074    /* No traversal or size check necessary for small bins.  */
1075
1076    q = bin_at(idx);
1077    victim = last(q);
1078
1079#if MALLOC_ALIGN != 16
1080    /* Also scan the next one, since it would have a remainder < MINSIZE */
1081    if (victim == q)
1082    {
1083      q = next_bin(q);
1084      victim = last(q);
1085    }
1086#endif
1087    if (victim != q)
1088    {
1089      victim_size = chunksize(victim);
1090      unlink(victim, bck, fwd);
1091      set_inuse_bit_at_offset(victim, victim_size);
1092      check_malloced_chunk(victim, nb);
1093      MALLOC_UNLOCK;
1094      return chunk2mem(victim);
1095    }
1096
1097    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1098
1099  }
1100  else
1101  {
1102    idx = bin_index(nb);
1103    bin = bin_at(idx);
1104
1105    for (victim = last(bin); victim != bin; victim = victim->bk)
1106    {
1107      victim_size = chunksize(victim);
1108      remainder_size = long_sub_size_t(victim_size, nb);
1109     
1110      if (remainder_size >= (long)MINSIZE) /* too big */
1111      {
1112        --idx; /* adjust to rescan below after checking last remainder */
1113        break;   
1114      }
1115
1116      else if (remainder_size >= 0) /* exact fit */
1117      {
1118        unlink(victim, bck, fwd);
1119        set_inuse_bit_at_offset(victim, victim_size);
1120        check_malloced_chunk(victim, nb);
1121        MALLOC_UNLOCK;
1122        return chunk2mem(victim);
1123      }
1124    }
1125
1126    ++idx; 
1127
1128  }
1129
1130  /* Try to use the last split-off remainder */
1131
1132  if ( (victim = last_remainder->fd) != last_remainder)
1133  {
1134    victim_size = chunksize(victim);
1135    remainder_size = long_sub_size_t(victim_size, nb);
1136
1137    if (remainder_size >= (long)MINSIZE) /* re-split */
1138    {
1139      remainder = chunk_at_offset(victim, nb);
1140      set_head(victim, nb | PREV_INUSE);
1141      link_last_remainder(remainder);
1142      set_head(remainder, remainder_size | PREV_INUSE);
1143      set_foot(remainder, remainder_size);
1144      check_malloced_chunk(victim, nb);
1145      MALLOC_UNLOCK;
1146      return chunk2mem(victim);
1147    }
1148
1149    clear_last_remainder;
1150
1151    if (remainder_size >= 0)  /* exhaust */
1152    {
1153      set_inuse_bit_at_offset(victim, victim_size);
1154      check_malloced_chunk(victim, nb);
1155      MALLOC_UNLOCK;
1156      return chunk2mem(victim);
1157    }
1158
1159    /* Else place in bin */
1160
1161    frontlink(victim, victim_size, remainder_index, bck, fwd);
1162  }
1163
1164  /*
1165     If there are any possibly nonempty big-enough blocks,
1166     search for best fitting chunk by scanning bins in blockwidth units.
1167  */
1168
1169  if ( (block = idx2binblock(idx)) <= binblocks) 
1170  {
1171
1172    /* Get to the first marked block */
1173
1174    if ( (block & binblocks) == 0) 
1175    {
1176      /* force to an even block boundary */
1177      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1178      block <<= 1;
1179      while ((block & binblocks) == 0)
1180      {
1181        idx += BINBLOCKWIDTH;
1182        block <<= 1;
1183      }
1184    }
1185     
1186    /* For each possibly nonempty block ... */
1187    for (;;) 
1188    {
1189      startidx = idx;          /* (track incomplete blocks) */
1190      q = bin = bin_at(idx);
1191
1192      /* For each bin in this block ... */
1193      do
1194      {
1195        /* Find and use first big enough chunk ... */
1196
1197        for (victim = last(bin); victim != bin; victim = victim->bk)
1198        {
1199          victim_size = chunksize(victim);
1200          remainder_size = long_sub_size_t(victim_size, nb);
1201
1202          if (remainder_size >= (long)MINSIZE) /* split */
1203          {
1204            remainder = chunk_at_offset(victim, nb);
1205            set_head(victim, nb | PREV_INUSE);
1206            unlink(victim, bck, fwd);
1207            link_last_remainder(remainder);
1208            set_head(remainder, remainder_size | PREV_INUSE);
1209            set_foot(remainder, remainder_size);
1210            check_malloced_chunk(victim, nb);
1211            MALLOC_UNLOCK;
1212            return chunk2mem(victim);
1213          }
1214
1215          else if (remainder_size >= 0)  /* take */
1216          {
1217            set_inuse_bit_at_offset(victim, victim_size);
1218            unlink(victim, bck, fwd);
1219            check_malloced_chunk(victim, nb);
1220            MALLOC_UNLOCK;
1221            return chunk2mem(victim);
1222          }
1223
1224        }
1225
1226       bin = next_bin(bin);
1227
1228#if MALLOC_ALIGN == 16
1229       if (idx < MAX_SMALLBIN)
1230         {
1231           bin = next_bin(bin);
1232           ++idx;
1233         }
1234#endif
1235      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1236
1237      /* Clear out the block bit. */
1238
1239      do   /* Possibly backtrack to try to clear a partial block */
1240      {
1241        if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1242        {
1243          binblocks &= ~block;
1244          break;
1245        }
1246        --startidx;
1247       q = prev_bin(q);
1248      } while (first(q) == q);
1249
1250      /* Get to the next possibly nonempty block */
1251
1252      if ( (block <<= 1) <= binblocks && (block != 0) ) 
1253      {
1254        while ((block & binblocks) == 0)
1255        {
1256          idx += BINBLOCKWIDTH;
1257          block <<= 1;
1258        }
1259      }
1260      else
1261        break;
1262    }
1263  }
1264
1265
1266  /* Try to use top chunk */
1267
1268  /* Require that there be a remainder, ensuring top always exists  */
1269  remainder_size = long_sub_size_t(chunksize(top), nb);
1270  if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
1271  {
1272      //diag_printf("chunksize(top)=%ld, nb=%d, remainder=%ld\n", chunksize(top),
1273      //            nb, remainder_size);
1274      MALLOC_UNLOCK;
1275      CYG_MEMALLOC_FAIL(bytes);
1276      return NULL; /* propagate failure */
1277  }
1278
1279  victim = top;
1280  set_head(victim, nb | PREV_INUSE);
1281  top = chunk_at_offset(victim, nb);
1282  set_head(top, remainder_size | PREV_INUSE);
1283  check_malloced_chunk(victim, nb);
1284  MALLOC_UNLOCK;
1285  return chunk2mem(victim);
1286
1287} // Cyg_Mempool_dlmalloc_Implementation::try_alloc()
1288
1289//----------------------------------------------------------------------------
1290
1291/*
1292  free() algorithm :
1293
1294    cases:
1295
1296       1. free(NULL) has no effect. 
1297
1298       2. Chunks are consolidated as they arrive, and
1299          placed in corresponding bins. (This includes the case of
1300          consolidating with the current `last_remainder').
1301*/
1302
1303cyg_bool
1304Cyg_Mempool_dlmalloc_Implementation::free( cyg_uint8 *mem, cyg_int32 )
1305{
1306  mchunkptr p;         /* chunk corresponding to mem */
1307  INTERNAL_SIZE_T hd;  /* its head field */
1308  INTERNAL_SIZE_T sz;  /* its size */
1309  int       idx;       /* its bin index */
1310  mchunkptr next;      /* next contiguous chunk */
1311  INTERNAL_SIZE_T nextsz; /* its size */
1312  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1313  mchunkptr bck;       /* misc temp for linking */
1314  mchunkptr fwd;       /* misc temp for linking */
1315  int       islr;      /* track whether merging with last_remainder */
1316
1317  if (mem == NULL)                              /* free(NULL) has no effect */
1318    return false;
1319
1320  MALLOC_LOCK;
1321
1322  p = mem2chunk(mem);
1323  hd = p->size;
1324
1325  check_inuse_chunk(p);
1326 
1327  sz = hd & ~PREV_INUSE;
1328  next = chunk_at_offset(p, sz);
1329  nextsz = chunksize(next);
1330 
1331  if (next == top)                            /* merge with top */
1332  {
1333    sz += nextsz;
1334
1335    if (!(hd & PREV_INUSE))                    /* consolidate backward */
1336    {
1337      prevsz = p->prev_size;
1338      p = chunk_at_offset(p, -((long) prevsz));
1339      sz += prevsz;
1340      unlink(p, bck, fwd);
1341    }
1342
1343    set_head(p, sz | PREV_INUSE);
1344    top = p;
1345    MALLOC_UNLOCK;
1346    return true;
1347  }
1348
1349  set_head(next, nextsz);                    /* clear inuse bit */
1350
1351  islr = 0;
1352
1353  if (!(hd & PREV_INUSE))                    /* consolidate backward */
1354  {
1355    prevsz = p->prev_size;
1356    p = chunk_at_offset(p, -((long) prevsz));
1357    sz += prevsz;
1358   
1359    if (p->fd == last_remainder)             /* keep as last_remainder */
1360      islr = 1;
1361    else
1362      unlink(p, bck, fwd);
1363  }
1364 
1365  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
1366  {
1367    sz += nextsz;
1368   
1369    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
1370    {
1371      islr = 1;
1372      link_last_remainder(p);   
1373    }
1374    else
1375      unlink(next, bck, fwd);
1376  }
1377
1378
1379  set_head(p, sz | PREV_INUSE);
1380  set_foot(p, sz);
1381  if (!islr)
1382    frontlink(p, sz, idx, bck, fwd); 
1383
1384  MALLOC_UNLOCK;
1385
1386  return true;
1387} // Cyg_Mempool_dlmalloc_Implementation::free()
1388
1389//----------------------------------------------------------------------------
1390
1391// resize existing allocation, if oldsize is non-NULL, previous
1392// allocation size is placed into it. If previous size not available,
1393// it is set to 0. NB previous allocation size may have been rounded up.
1394// Occasionally the allocation can be adjusted *backwards* as well as,
1395// or instead of forwards, therefore the address of the resized
1396// allocation is returned, or NULL if no resizing was possible.
1397// Note that this differs from ::realloc() in that no attempt is
1398// made to call malloc() if resizing is not possible - that is left
1399// to higher layers. The data is copied from old to new though.
1400// The effects of alloc_ptr==NULL or newsize==0 are undefined
1401
1402
1403// DOCUMENTATION FROM ORIGINAL FILE:
1404// (some now irrelevant parts elided)
1405/*
1406  Realloc algorithm:
1407
1408    If the reallocation is for additional space, and the
1409    chunk can be extended, it is, else a malloc-copy-free sequence is
1410    taken.  There are several different ways that a chunk could be
1411    extended. All are tried:
1412
1413       * Extending forward into following adjacent free chunk.
1414       * Shifting backwards, joining preceding adjacent space
1415       * Both shifting backwards and extending forward.
1416
1417    If the reallocation is for less space, and the new request is for
1418    a `small' (<512 bytes) size, then the newly unused space is lopped
1419    off and freed.
1420
1421    The old unix realloc convention of allowing the last-free'd chunk
1422    to be used as an argument to realloc is no longer supported.
1423    I don't know of any programs still relying on this feature,
1424    and allowing it would also allow too many other incorrect
1425    usages of realloc to be sensible.
1426*/
1427
1428cyg_uint8 *
1429Cyg_Mempool_dlmalloc_Implementation::resize_alloc( cyg_uint8 *oldmem,
1430                                                   cyg_int32 bytes,
1431                                                   cyg_int32 *poldsize )
1432{
1433
1434  INTERNAL_SIZE_T nb;             /* padded request size */
1435
1436  mchunkptr oldp;                 /* chunk corresponding to oldmem */
1437  INTERNAL_SIZE_T oldsize;        /* its size */
1438
1439  mchunkptr newp;                 /* chunk to return */
1440  INTERNAL_SIZE_T newsize;        /* its size */
1441  cyg_uint8*   newmem;            /* corresponding user mem */
1442
1443  mchunkptr next;                 /* next contiguous chunk after oldp */
1444  INTERNAL_SIZE_T nextsize;       /* its size */
1445
1446  mchunkptr prev;                 /* previous contiguous chunk before oldp */
1447  INTERNAL_SIZE_T prevsize;       /* its size */
1448
1449  mchunkptr remainder;            /* holds split off extra space from newp */
1450  INTERNAL_SIZE_T remainder_size; /* its size */
1451
1452  mchunkptr bck;                  /* misc temp for linking */
1453  mchunkptr fwd;                  /* misc temp for linking */
1454
1455  MALLOC_LOCK;
1456
1457  newp    = oldp    = mem2chunk(oldmem);
1458  newsize = oldsize = chunksize(oldp);
1459
1460  if (NULL != poldsize)
1461      *poldsize = oldsize - SIZE_SZ;
1462
1463  nb = request2size(bytes);
1464
1465  check_inuse_chunk(oldp);
1466
1467  if ((long)(oldsize) < (long)(nb)) 
1468  {
1469
1470    /* Try expanding forward */
1471
1472    next = chunk_at_offset(oldp, oldsize);
1473    if (next == top || !inuse(next)) 
1474    {
1475      nextsize = chunksize(next);
1476
1477      /* Forward into top only if a remainder */
1478      if (next == top)
1479      {
1480        if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1481        {
1482          newsize += nextsize;
1483          top = chunk_at_offset(oldp, nb);
1484          set_head(top, (newsize - nb) | PREV_INUSE);
1485          set_head_size(oldp, nb);
1486          MALLOC_UNLOCK;
1487          return chunk2mem(oldp);
1488        }
1489      }
1490
1491      /* Forward into next chunk */
1492      else if (((long)(nextsize + newsize) >= (long)(nb)))
1493      { 
1494        unlink(next, bck, fwd);
1495        newsize  += nextsize;
1496        goto split;
1497      }
1498    }
1499    else
1500    {
1501      next = 0;
1502      nextsize = 0;
1503    }
1504
1505    /* Try shifting backwards. */
1506
1507    if (!prev_inuse(oldp))
1508    {
1509      prev = prev_chunk(oldp);
1510      prevsize = chunksize(prev);
1511
1512      /* try forward + backward first to save a later consolidation */
1513
1514      if (next != 0)
1515      {
1516        /* into top */
1517        if (next == top)
1518        {
1519          if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1520          {
1521            unlink(prev, bck, fwd);
1522            newp = prev;
1523            newsize += prevsize + nextsize;
1524            newmem = chunk2mem(newp);
1525            MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1526            top = chunk_at_offset(newp, nb);
1527            set_head(top, (newsize - nb) | PREV_INUSE);
1528            set_head_size(newp, nb);
1529            MALLOC_UNLOCK;
1530            return newmem;
1531          }
1532        }
1533
1534        /* into next chunk */
1535        else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1536        {
1537          unlink(next, bck, fwd);
1538          unlink(prev, bck, fwd);
1539          newp = prev;
1540          newsize += nextsize + prevsize;
1541          newmem = chunk2mem(newp);
1542          MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1543          goto split;
1544        }
1545      }
1546     
1547      /* backward only */
1548      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) 
1549      {
1550        unlink(prev, bck, fwd);
1551        newp = prev;
1552        newsize += prevsize;
1553        newmem = chunk2mem(newp);
1554        MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1555        goto split;
1556      }
1557    }
1558
1559    // couldn't resize the allocation any direction, so return failure
1560    MALLOC_UNLOCK;
1561    CYG_MEMALLOC_FAIL(bytes);
1562    return NULL;
1563  }
1564
1565
1566 split:  /* split off extra room in old or expanded chunk */
1567
1568  remainder_size = long_sub_size_t(newsize, nb);
1569
1570  if (remainder_size >= (long)MINSIZE) /* split off remainder */
1571  {
1572    remainder = chunk_at_offset(newp, nb);
1573    set_head_size(newp, nb);
1574    set_head(remainder, remainder_size | PREV_INUSE);
1575    set_inuse_bit_at_offset(remainder, remainder_size);
1576    /* let free() deal with it */
1577    Cyg_Mempool_dlmalloc_Implementation::free( chunk2mem(remainder) );
1578  }
1579  else
1580  {
1581    set_head_size(newp, newsize);
1582    set_inuse_bit_at_offset(newp, newsize);
1583  }
1584
1585  check_inuse_chunk(newp);
1586  MALLOC_UNLOCK;
1587  return chunk2mem(newp);
1588
1589} // Cyg_Mempool_dlmalloc_Implementation::resize_alloc()
1590
1591//----------------------------------------------------------------------------
1592
1593// Get memory pool status
1594// flags is a bitmask of requested fields to fill in. The flags are
1595// defined in common.hxx
1596void
1597Cyg_Mempool_dlmalloc_Implementation::get_status( 
1598                                  cyg_mempool_status_flag_t flags,
1599                                  Cyg_Mempool_Status &status )
1600{
1601    if (0 != (flags&(CYG_MEMPOOL_STAT_FREEBLOCKS|CYG_MEMPOOL_STAT_TOTALFREE|
1602                     CYG_MEMPOOL_STAT_TOTALALLOCATED|CYG_MEMPOOL_STAT_MAXFREE)))
1603    {
1604        int i;
1605        mbinptr b;
1606        mchunkptr p;
1607        cyg_int32 chunksizep;
1608        cyg_int32 maxfree;
1609#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1610        mchunkptr q;
1611#endif
1612       
1613        INTERNAL_SIZE_T avail = chunksize(top);
1614        int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; 
1615        maxfree = avail;
1616       
1617        for (i = 1; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) {
1618            b = bin_at(i);
1619            for (p = last(b); p != b; p = p->bk) {
1620#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1621                check_free_chunk(p);
1622                for (q = next_chunk(p); 
1623                     (q < top) && inuse(q) && 
1624                         (long)(chunksize(q)) >= (long)MINSIZE; 
1625                     q = next_chunk(q))
1626                    check_inuse_chunk(q);
1627#endif
1628                chunksizep = chunksize(p);
1629                avail += chunksizep;
1630                if ( chunksizep > maxfree )
1631                    maxfree = chunksizep;
1632                navail++;
1633            }
1634        }
1635   
1636        if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) )
1637            status.totalallocated = arenasize - avail;
1638        // as quick or quicker to just set most of these, rather than
1639        // test flag first
1640        status.totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1641        CYG_ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1642                    >= MINSIZE, "free mem negative!" );
1643        status.freeblocks = navail;
1644        status.maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1645        //diag_printf("raw mf: %d, ret mf: %d\n", maxfree, status.maxfree);
1646        CYG_ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) & 
1647                    ~MALLOC_ALIGN_MASK) >= MINSIZE, 
1648                    "max free block size negative!" );
1649    } // if
1650
1651    // as quick or quicker to just set most of these, rather than
1652    // test flag first
1653    status.arenabase = status.origbase = arenabase;
1654    status.arenasize = status.origsize = arenasize;
1655    status.maxoverhead = MINSIZE + MALLOC_ALIGNMENT;
1656
1657} // Cyg_Mempool_dlmalloc_Implementation::get_status()
1658
1659
1660//----------------------------------------------------------------------------
1661
1662// EOF dlmalloc.cxx
Note: See TracBrowser for help on using the repository browser.