/*
* util/alloc.c - memory allocation service.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** custom size of cached regional blocks */
#define ALLOC_REG_SIZE 16384
/** number of bits for ID part of uint64, rest for number of threads. */
#define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */
/** setup new special type */
static void
alloc_setup_special(alloc_special_type* t)
{
memset(t, 0, sizeof(*t));
lock_rw_init(&t->entry.lock);
t->entry.key = t;
}
/** prealloc some entries in the cache. To minimize contention.
* Result is 1 lock per alloc_max newly created entries.
* @param alloc: the structure to fill up.
*/
static void
prealloc_setup(struct alloc_cache* alloc)
{
alloc_special_type* p;
int i;
for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
if(!(p = (alloc_special_type*)malloc(
sizeof(alloc_special_type)))) {
log_err("prealloc: out of memory");
return;
}
alloc_setup_special(p);
alloc_set_special_next(p, alloc->quar);
alloc->quar = p;
alloc->num_quar++;
}
}
void
alloc_clear(struct alloc_cache* alloc)
{
alloc_special_type* p;
struct regional* r, *nr;
if(!alloc)
return;
if(!alloc->super) {
lock_quick_destroy(&alloc->lock);
}
if(alloc->super && alloc->quar) {
/* push entire list into super */
p = alloc->quar;
while(alloc_special_next(p)) /* find last */
p = alloc_special_next(p);
lock_quick_lock(&alloc->super->lock);
alloc_set_special_next(p, alloc->super->quar);
alloc->super->quar = alloc->quar;
alloc->super->num_quar += alloc->num_quar;
lock_quick_unlock(&alloc->super->lock);
} else {
alloc_clear_special_list(alloc);
}
alloc->quar = 0;
alloc->num_quar = 0;
r = alloc->reg_list;
while(r) {
nr = (struct regional*)r->next;
free(r);
r = nr;
}
alloc->reg_list = NULL;
alloc->num_reg_blocks = 0;
}
uint64_t
alloc_get_id(struct alloc_cache* alloc)
{
uint64_t id = alloc->next_id++;
if(id == alloc->last_id) {
log_warn("rrset alloc: out of 64bit ids. Clearing cache.");
fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup));
(*alloc->cleanup)(alloc->cleanup_arg);
/* start back at first number */ /* like in alloc_init*/
alloc->next_id = (uint64_t)alloc->thread_num;
alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */
alloc->next_id += 1; /* portability. */
/* and generate new and safe id */
id = alloc->next_id++;
}
return id;
}
alloc_special_type*
alloc_special_obtain(struct alloc_cache* alloc)
{
alloc_special_type* p;
log_assert(alloc);
/* see if in local cache */
if(alloc->quar) {
p = alloc->quar;
alloc->quar = alloc_special_next(p);
alloc->num_quar--;
p->id = alloc_get_id(alloc);
return p;
}
/* see if in global cache */
if(alloc->super) {
/* could maybe grab alloc_max/2 entries in one go,
* but really, isn't that just as fast as this code? */
lock_quick_lock(&alloc->super->lock);
if((p = alloc->super->quar)) {
alloc->super->quar = alloc_special_next(p);
alloc->super->num_quar--;
}
lock_quick_unlock(&alloc->super->lock);
if(p) {
p->id = alloc_get_id(alloc);
return p;
}
}
/* allocate new */
prealloc_setup(alloc);
if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) {
log_err("alloc_special_obtain: out of memory");
return NULL;
}
alloc_setup_special(p);
p->id = alloc_get_id(alloc);
return p;
}
/** push mem and some more items to the super */
static void
pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem)
{
int i;
alloc_special_type *p = alloc->quar;
log_assert(p);
log_assert(alloc && alloc->super &&
alloc->num_quar >= ALLOC_SPECIAL_MAX);
/* push ALLOC_SPECIAL_MAX/2 after mem */
alloc_set_special_next(mem, alloc->quar);
for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) {
p = alloc_special_next(p);
}
alloc->quar = alloc_special_next(p);
alloc->num_quar -= ALLOC_SPECIAL_MAX/2;
/* dump mem+list into the super quar list */
lock_quick_lock(&alloc->super->lock);
alloc_set_special_next(p, alloc->super->quar);
alloc->super->quar = mem;
alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1;
lock_quick_unlock(&alloc->super->lock);
/* so 1 lock per mem+alloc/2 deletes */
}
alloc_special_clean(mem);
if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) {
/* push it to the super structure */
pushintosuper(alloc, mem);
return;
}