/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
/*
* This implements:
* 1. allocation of heap block headers
* 2. A map from addresses to heap block addresses to heap block headers
*
* Access speed is crucial. We implement an index structure based on a 2
* level tree.
*/
/* Handle a header cache miss. Returns a pointer to the */
/* header corresponding to p, if p can possibly be a valid */
/* object pointer, and 0 otherwise. */
/* GUARANTEED to return 0 for a pointer past the first page */
/* of an object unless both GC_all_interior_pointers is set */
/* and p is in fact a valid object pointer. */
/* Never returns a pointer to a free hblk. */
GC_INNER hdr *
#ifdef PRINT_BLACK_LIST
GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
#else
GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
#endif
{
hdr *hhdr;
HC_MISS();
GET_HDR(p, hhdr);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
if (GC_all_interior_pointers) {
if (hhdr != 0) {
ptr_t current = p;
current = (ptr_t)HBLKPTR(current);
do {
current = current - HBLKSIZE*(word)hhdr;
hhdr = HDR(current);
} while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
/* current points to near the start of the large object */
if (hhdr -> hb_flags & IGNORE_OFF_PAGE)
return 0;
if (HBLK_IS_FREE(hhdr)
|| p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
/* Pointer past the end of the block */
return 0;
}
} else {
GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
/* And return zero: */
}
GC_ASSERT(hhdr == 0 || !HBLK_IS_FREE(hhdr));
return hhdr;
/* Pointers past the first page are probably too rare */
/* to add them to the cache. We don't. */
/* And correctness relies on the fact that we don't. */
} else {
if (hhdr == 0) {
GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
}
return 0;
}
} else {
if (HBLK_IS_FREE(hhdr)) {
GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
return 0;
} else {
hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
hce -> hce_hdr = hhdr;
return hhdr;
}
}
}
/* Routines to dynamically allocate collector data structures that will */
/* never be freed. */
bytes = ROUNDUP_GRANULE_SIZE(bytes);
for (;;) {
GC_ASSERT((word)GC_scratch_end_ptr >= (word)result);
if (bytes <= (word)GC_scratch_end_ptr - (word)result) {
/* Unallocated space of scratch buffer has enough size. */
GC_scratch_free_ptr = result + bytes;
return result;
}
GC_ASSERT(GC_page_size != 0);
if (bytes >= MINHINCR * HBLKSIZE) {
bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
result = (ptr_t)GET_MEM(bytes_to_get);
if (result != NULL) {
GC_add_to_our_memory(result, bytes_to_get);
/* No update of scratch free area pointer; */
/* get memory directly. */
# ifdef USE_SCRATCH_LAST_END_PTR
/* Update end point of last obtained area (needed only */
/* by GC_register_dynamic_libraries for some targets). */
GC_scratch_last_end_ptr = result + bytes;
# endif
}
return result;
}
bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(MINHINCR * HBLKSIZE);
/* round up for safety */
result = (ptr_t)GET_MEM(bytes_to_get);
if (EXPECT(NULL == result, FALSE)) {
WARN("Out of memory - trying to allocate requested amount"
" (%" WARN_PRIuPTR " bytes)...\n", bytes);
bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
result = (ptr_t)GET_MEM(bytes_to_get);
if (result != NULL) {
GC_add_to_our_memory(result, bytes_to_get);
# ifdef USE_SCRATCH_LAST_END_PTR
GC_scratch_last_end_ptr = result + bytes;
# endif
}
return result;
}
GC_add_to_our_memory(result, bytes_to_get);
/* TODO: some amount of unallocated space may remain unused forever */
/* Update scratch area pointers and retry. */
GC_scratch_free_ptr = result;
GC_scratch_end_ptr = GC_scratch_free_ptr + bytes_to_get;
# ifdef USE_SCRATCH_LAST_END_PTR
GC_scratch_last_end_ptr = GC_scratch_end_ptr;
# endif
}
}
#ifdef COUNT_HDR_CACHE_HITS
/* Used for debugging/profiling (the symbols are externally visible). */
word GC_hdr_cache_hits = 0;
word GC_hdr_cache_misses = 0;
#endif
GC_INNER void GC_init_headers(void)
{
unsigned i;
GC_ASSERT(NULL == GC_all_nils);
GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
if (GC_all_nils == NULL) {
GC_err_printf("Insufficient memory for GC_all_nils\n");
EXIT();
}
BZERO(GC_all_nils, sizeof(bottom_index));
for (i = 0; i < TOP_SZ; i++) {
GC_top_index[i] = GC_all_nils;
}
}
/* Make sure that there is a bottom level index block for address addr. */
/* Return FALSE on failure. */
static GC_bool get_index(word addr)
{
word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
bottom_index * r;
bottom_index * p;
bottom_index ** prev;
bottom_index *pi; /* old_p */
word i;
GC_ASSERT(I_HOLD_LOCK());
# ifdef HASH_TL
i = TL_HASH(hi);
pi = p = GC_top_index[i];
while(p != GC_all_nils) {
if (p -> key == hi) return(TRUE);
p = p -> hash_link;
}
# else
if (GC_top_index[hi] != GC_all_nils)
return TRUE;
i = hi;
# endif
r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
if (EXPECT(NULL == r, FALSE))
return FALSE;
BZERO(r, sizeof(bottom_index));
r -> key = hi;
# ifdef HASH_TL
r -> hash_link = pi;
# endif
/* Add it to the list of bottom indices */
prev = &GC_all_bottom_indices; /* pointer to p */
pi = 0; /* bottom_index preceding p */
while ((p = *prev) != 0 && p -> key < hi) {
pi = p;
prev = &(p -> asc_link);
}
r -> desc_link = pi;
if (0 == p) {
GC_all_bottom_indices_end = r;
} else {
p -> desc_link = r;
}
r -> asc_link = p;
*prev = r;
GC_top_index[i] = r;
return(TRUE);
}
/* Install a header for block h. */
/* The header is uninitialized. */
/* Returns the header or 0 on failure. */
GC_INNER struct hblkhdr * GC_install_header(struct hblk *h)
{
hdr * result;
if (!get_index((word) h)) return(0);
result = alloc_hdr();
if (result) {
SET_HDR(h, result);
# ifdef USE_MUNMAP
result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
# endif
}
return(result);
}
/* Set up forwarding counts for block h of size sz */
GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
struct hblk * hbp;
for (hbp = h; (word)hbp < (word)h + sz; hbp += BOTTOM_SZ) {
if (!get_index((word)hbp))
return FALSE;
if ((word)hbp > GC_WORD_MAX - (word)BOTTOM_SZ * HBLKSIZE)
break; /* overflow of hbp+=BOTTOM_SZ is expected */
}
if (!get_index((word)h + sz - 1))
return FALSE;
for (hbp = h + 1; (word)hbp < (word)h + sz; hbp += 1) {
word i = HBLK_PTR_DIFF(hbp, h);
/* Apply fn to all allocated blocks. It is the caller responsibility */
/* to avoid data race during the function execution (e.g. by holding */
/* the allocation lock). */
void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
word client_data)
{
signed_word j;
bottom_index * index_p;