/*
* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#if defined(HAVE_CONFIG_H)
# include "config.h"
#endif
#ifdef DONT_USE_MMAP /* for testing */
# undef HAVE_MMAP
#endif
#if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(AO_HAVE_abort)
# define abort() _exit(-1) /* there is no abort() in WinCE */
#endif
/*
* We round up each allocation request to the next power of two
* minus one word.
* We keep one stack of free objects for each size. Each object
* has an initial word (offset -sizeof(AO_t) from the visible pointer)
* which contains either
* The binary log of the object size in bytes (small objects)
* The object size (a multiple of CHUNK_SIZE) for large objects.
* The second case only arises if mmap-based allocation is supported.
* We align the user-visible part of each object on an ALIGNMENT
* byte boundary. That means that the actual (hidden) start of
* the object starts a word before this boundary.
*/
#ifndef LOG_MAX_SIZE
# define LOG_MAX_SIZE 16
/* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */
#endif
#ifndef ALIGNMENT
# define ALIGNMENT 16
/* Assumed to be at least sizeof(AO_t). */
#endif
#ifdef USE_MMAP_FIXED
# define GC_MMAP_FLAGS (MAP_FIXED | MAP_PRIVATE)
/* Seems to yield better performance on Solaris 2, but can */
/* be unreliable if something is already mapped at the address. */
#else
# define GC_MMAP_FLAGS MAP_PRIVATE
#endif
#ifdef USE_MMAP_ANON
# if defined(CPPCHECK)
# define OPT_MAP_ANON 0x20 /* taken from linux */
# elif defined(MAP_ANONYMOUS)
# define OPT_MAP_ANON MAP_ANONYMOUS
# else
# define OPT_MAP_ANON MAP_ANON
# endif
#else
# include <unistd.h> /* for close() */
# define OPT_MAP_ANON 0
#endif
static volatile AO_t mmap_enabled = 0;
AO_API void
AO_malloc_enable_mmap(void)
{
# if defined(__sun)
AO_store_release(&mmap_enabled, 1);
/* Workaround for Sun CC */
# else
AO_store(&mmap_enabled, 1);
# endif
}
#ifndef SIZE_MAX
# include <limits.h>
#endif
#if defined(SIZE_MAX) && !defined(CPPCHECK)
# define AO_SIZE_MAX ((size_t)SIZE_MAX)
/* Extra cast to workaround some buggy SIZE_MAX definitions. */
#else
# define AO_SIZE_MAX (~(size_t)0)
#endif
/* Saturated addition of size_t values. Used to avoid value wrap */
/* around on overflow. The arguments should have no side effects. */
#define SIZET_SAT_ADD(a, b) \
(AO_EXPECT_FALSE((a) >= AO_SIZE_MAX - (b)) ? AO_SIZE_MAX : (a) + (b))
/* Allocate an object of size (incl. header) of size > CHUNK_SIZE. */
/* sz includes space for an AO_t-sized header. */
static char *
AO_malloc_large(size_t sz)
{
void *result;
/* The header will force us to waste ALIGNMENT bytes, incl. header. */
/* Round to multiple of CHUNK_SIZE. */
sz = SIZET_SAT_ADD(sz, ALIGNMENT + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1);
assert(sz > LOG_MAX_SIZE);
result = get_mmaped(sz);
if (AO_EXPECT_FALSE(NULL == result))
return NULL;
for (;;) {
char *initial_ptr = (char *)AO_load(&initial_heap_ptr);
my_chunk_ptr = (char *)(((AO_t)initial_ptr + (ALIGNMENT - 1))
& ~(ALIGNMENT - 1));
if (initial_ptr != my_chunk_ptr)
{
/* Align correctly. If this fails, someone else did it for us. */
(void)AO_compare_and_swap_acquire(&initial_heap_ptr,
(AO_t)initial_ptr, (AO_t)my_chunk_ptr);
}
if (AO_EXPECT_FALSE((AO_t)my_chunk_ptr - (AO_t)AO_initial_heap
> (size_t)(AO_INITIAL_HEAP_SIZE - CHUNK_SIZE))) {
/* We failed. The initial heap is used up. */
my_chunk_ptr = get_mmaped(CHUNK_SIZE);
# if !defined(CPPCHECK)
assert(((AO_t)my_chunk_ptr & (ALIGNMENT-1)) == 0);
# endif
break;
}
if (AO_compare_and_swap(&initial_heap_ptr, (AO_t)my_chunk_ptr,
(AO_t)(my_chunk_ptr + CHUNK_SIZE))) {
break;
}
}
return my_chunk_ptr;
}
/* Object free lists. Ith entry corresponds to objects */
/* of total size 2**i bytes. */
static AO_stack_t AO_free_list[LOG_MAX_SIZE+1];
/* Break up the chunk, and add it to the object free list for */
/* the given size. We have exclusive access to chunk. */
static void add_chunk_as(void * chunk, unsigned log_sz)
{
size_t ofs, limit;
size_t sz = (size_t)1 << log_sz;
/* Return the position of the most significant set bit in the */
/* argument. */
/* We follow the conventions of ffs(), i.e. the least */
/* significant bit is number one. */
static unsigned msb(size_t s)
{
unsigned result = 0;
if ((s & 0xff) != s) {
# if (__SIZEOF_SIZE_T__ == 8) && !defined(CPPCHECK)
unsigned v = (unsigned)(s >> 32);
if (AO_EXPECT_FALSE(v != 0))
{
s = v;
result += 32;
}
# elif __SIZEOF_SIZE_T__ == 4
/* No op. */
# else
unsigned v;
/* The following is a tricky code ought to be equivalent to */
/* "(v = s >> 32) != 0" but suppresses warnings on 32-bit arch's. */
# define SIZEOF_SIZE_T_GT_4 (sizeof(size_t) > 4)
if (SIZEOF_SIZE_T_GT_4
&& (v = (unsigned)(s >> (SIZEOF_SIZE_T_GT_4 ? 32 : 0))) != 0)
{
s = v;
result += 32;
}
# endif /* !defined(__SIZEOF_SIZE_T__) */
if (AO_EXPECT_FALSE((s >> 16) != 0))
{
s >>= 16;
result += 16;
}
if ((s >> 8) != 0)
{
s >>= 8;
result += 8;
}
}
if (s > 15)
{
s >>= 4;
result += 4;
}
result += msbs[s];
return result;
}