/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
* Copyright (c) 2009-2021 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* An incomplete test for the garbage collector. */
/* Some more obscure entry points are not tested at all. */
/* This must be compiled with the same flags used to build the */
/* GC. It uses GC internals to allow more precise results */
/* checking for some of the tests. */
# ifdef HAVE_CONFIG_H
# include "config.h"
# endif
#ifndef NTHREADS /* Number of additional threads to fork. */
# define NTHREADS 5 /* Excludes main thread, which also runs a test. */
/* In the single-threaded case, the number of times to rerun it. */
/* Not respected by PCR test. */
#endif
# if defined(mips) && defined(SYSTYPE_BSD43)
/* MIPS RISCOS 4 */
# else
# include <stdlib.h>
# endif
# include <stdio.h>
# if defined(_WIN32_WCE) && !defined(__GNUC__)
# include <winbase.h>
/* # define assert ASSERT */
# else
# include <assert.h> /* Not normally used, but handy for debugging. */
# endif
/* Call GC_INIT only on platforms on which we think we really need it, */
/* so that we can test automatic initialization on the rest. */
#if defined(TEST_EXPLICIT_GC_INIT) || defined(AIX) || defined(CYGWIN32) \
|| defined(DARWIN) || defined(HOST_ANDROID) \
|| (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT))
# define GC_OPT_INIT GC_INIT()
#else
# define GC_OPT_INIT /* empty */
#endif
#define INIT_FIND_LEAK \
if (!GC_get_find_leak()) {} else \
GC_printf("This test program is not designed for leak detection mode\n")
/* Define AO primitives for a single-threaded mode. */
#ifndef AO_HAVE_compiler_barrier
/* AO_t not defined. */
# define AO_t GC_word
#endif
#ifndef AO_HAVE_load_acquire
static AO_t AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
FINALIZER_LOCK();
result = *addr;
FINALIZER_UNLOCK();
return result;
}
#endif
#ifndef AO_HAVE_store_release
/* Not a macro as new_val argument should be evaluated before the lock. */
static void AO_store_release(volatile AO_t *addr, AO_t new_val)
{
FINALIZER_LOCK();
*addr = new_val;
FINALIZER_UNLOCK();
}
#endif
#ifndef AO_HAVE_fetch_and_add1
# define AO_fetch_and_add1(p) ((*(p))++)
/* This is used only to update counters. */
#endif
/* AT_END may be defined to exercise the interior pointer test */
/* if the collector is configured with ALL_INTERIOR_POINTERS. */
/* As it stands, this test should succeed with either */
/* configuration. In the FIND_LEAK configuration, it should */
/* find lots of leaks, since we free almost nothing. */
/* The following struct emulates the vtable in gcj. */
/* This assumes the default value of MARK_DESCR_OFFSET. */
struct fake_vtable {
void * dummy; /* class pointer in real gcj. */
GC_word descr;
};
/* To check uncollectible allocation we build lists with disguised cdr */
/* pointers, and make sure they don't go away. */
sexpr uncollectable_ints(int low, int up)
{
if (low > up) {
return(nil);
} else {
return(small_cons_uncollectable(small_cons_leaf(low),
uncollectable_ints(low+1, up)));
}
}
void check_ints(sexpr list, int low, int up)
{
if (is_nil(list)) {
GC_printf("list is nil\n");
FAIL;
}
if (SEXPR_TO_INT(car(car(list))) != low) {
GC_printf(
"List reversal produced incorrect list - collector is broken\n");
FAIL;
}
if (low == up) {
if (cdr(list) != nil) {
GC_printf("List too long - collector is broken\n");
FAIL;
}
} else {
check_ints(cdr(list), low+1, up);
}
}
void check_uncollectable_ints(sexpr list, int low, int up)
{
if (SEXPR_TO_INT(car(car(list))) != low) {
GC_printf("Uncollectable list corrupted - collector is broken\n");
FAIL;
}
if (low == up) {
if (UNCOLLECTABLE_CDR(list) != nil) {
GC_printf("Uncollectable list too long - collector is broken\n");
FAIL;
}
} else {
check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
}
}
/* Not used, but useful for debugging: */
void print_int_list(sexpr x)
{
if (is_nil(x)) {
GC_printf("NIL\n");
} else {
GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
if (!is_nil(cdr(x))) {
GC_printf(", ");
print_int_list(cdr(x));
} else {
GC_printf("\n");
}
}
}
# if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
DWORD __stdcall tiny_reverse_test(void * arg GC_ATTR_UNUSED)
# else
void * tiny_reverse_test(void * arg GC_ATTR_UNUSED)
# endif
{
int i;
for (i = 0; i < 5; ++i) {
check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
1, TINY_REVERSE_UPPER_VALUE);
}
# if defined(GC_ENABLE_SUSPEND_THREAD)
/* Force collection from a thread. */
GC_gcollect();
# endif
return 0;
}
# if defined(GC_PTHREADS)
# if defined(GC_ENABLE_SUSPEND_THREAD)
# include "javaxfc.h"
# endif
void fork_a_thread(void)
{
pthread_t t;
int code;
code = pthread_create(&t, NULL, tiny_reverse_test, 0);
if (code != 0) {
GC_printf("Small thread creation failed %d\n", code);
FAIL;
}
# if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
&& !defined(GC_OPENBSD_UTHREADS) && !defined(GC_WIN32_THREADS) \
&& !defined(NACL) && !defined(GC_OSF1_THREADS)
if (GC_is_thread_suspended(t)) {
GC_printf("Running thread should be not suspended\n");
FAIL;
}
/* Thread could be running or already terminated (but not joined). */
GC_suspend_thread(t);
if (!GC_is_thread_suspended(t)) {
GC_printf("Thread expected to be suspended\n");
FAIL;
}
GC_suspend_thread(t); /* should be no-op */
GC_resume_thread(t);
if (GC_is_thread_suspended(t)) {
GC_printf("Resumed thread should be not suspended\n");
FAIL;
}
GC_resume_thread(t); /* should be no-op */
# endif
if ((code = pthread_join(t, 0)) != 0) {
GC_printf("Small thread join failed %d\n", code);
FAIL;
}
}
# elif defined(GC_WIN32_THREADS)
void fork_a_thread(void)
{
DWORD thread_id;
HANDLE h;
h = CreateThread((SECURITY_ATTRIBUTES *)NULL, (word)0,
tiny_reverse_test, NULL, (DWORD)0, &thread_id);
/* Explicitly specify types of the */
/* arguments to test the prototype. */
if (h == (HANDLE)NULL) {
GC_printf("Small thread creation failed, errcode= %d\n",
(int)GetLastError());
FAIL;
}
if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
GC_printf("Small thread wait failed, errcode= %d\n",
(int)GetLastError());
FAIL;
}
}
# endif
#endif
void test_generic_malloc_or_special(void *p) {
size_t size;
int kind;
void *p2;
CHECK_OUT_OF_MEMORY(p);
kind = GC_get_kind_and_size(p, &size);
if (size != GC_size(p)) {
GC_printf("GC_get_kind_and_size returned size not matching GC_size\n");
FAIL;
}
p2 = GC_GENERIC_OR_SPECIAL_MALLOC(10, kind);
CHECK_OUT_OF_MEMORY(p2);
if (GC_get_kind_and_size(p2, NULL) != kind) {
GC_printf("GC_generic_or_special_malloc:"
" unexpected kind of returned object\n");
FAIL;
}
GC_FREE(p2);
}
/* Try to force a to be strangely aligned */
volatile struct A_s {
char dummy;
AO_t aa;
} A;
#define a_set(p) AO_store_release(&A.aa, (AO_t)(p))
#define a_get() (sexpr)AO_load_acquire(&A.aa)
/*
* Repeatedly reverse lists built out of very different sized cons cells.
* Check that we didn't lose anything.
*/
void *GC_CALLBACK reverse_test_inner(void *data)
{
int i;
sexpr b;
sexpr c;
sexpr d;
sexpr e;
sexpr *f, *g, *h;
if (data == 0) {
/* This stack frame is not guaranteed to be scanned. */
return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
}
# ifndef BIG
# if defined(MACOS) \
|| (defined(UNIX_LIKE) && defined(NO_GETCONTEXT)) /* e.g. musl */
/* Assume 128 KB stacks at least. */
# if defined(__s390x__)
# define BIG 600
# else
# define BIG 1000
# endif
# elif defined(PCR)
/* PCR default stack is 100 KB. Stack frames are up to 120 bytes. */
# define BIG 700
# elif defined(MSWINCE) || defined(RTEMS)
/* WinCE only allows 64 KB stacks. */
# define BIG 500
# elif defined(EMSCRIPTEN) || defined(OSF1)
/* Wasm reports "Maximum call stack size exceeded" error otherwise. */
/* OSF has limited stack space by default, and large frames. */
# define BIG 200
# elif defined(__MACH__) && defined(__ppc64__)
# define BIG 2500
# else
# define BIG 4500
# endif
# endif
a_set(ints(1, 49));
b = ints(1, 50);
c = ints(1, BIG);
d = uncollectable_ints(1, 100);
test_generic_malloc_or_special(d);
e = uncollectable_ints(1, 1);
/* Check that realloc updates object descriptors correctly */
AO_fetch_and_add1(&collectable_count);
f = (sexpr *)checkOOM(GC_MALLOC(4 * sizeof(sexpr)));
f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(f);
AO_fetch_and_add1(&realloc_count);
GC_PTR_STORE_AND_DIRTY(f + 5, ints(1, 17));
AO_fetch_and_add1(&collectable_count);
g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
test_generic_malloc_or_special(g);
g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(g);
AO_fetch_and_add1(&realloc_count);
GC_PTR_STORE_AND_DIRTY(g + 799, ints(1, 18));
AO_fetch_and_add1(&collectable_count);
h = (sexpr *)checkOOM(GC_MALLOC(1025 * sizeof(sexpr)));
h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(h);
AO_fetch_and_add1(&realloc_count);
# ifdef GC_GCJ_SUPPORT
GC_PTR_STORE_AND_DIRTY(h + 1999, gcj_ints(1, 200));
for (i = 0; i < 51; ++i) {
GC_PTR_STORE_AND_DIRTY(h + 1999, gcj_reverse(h[1999]));
}
/* Leave it as the reversed list for now. */
# else
GC_PTR_STORE_AND_DIRTY(h + 1999, ints(1, 200));
# endif
/* Try to force some collections and reuse of small list elements */
for (i = 0; i < 10; i++) {
(void)ints(1, BIG);
}
/* Superficially test interior pointer recognition on stack */
c = (sexpr)((char *)c + sizeof(char *));
d = (sexpr)((char *)d + sizeof(char *));
GC_FREE((void *)e);
check_ints(b,1,50);
# ifndef EMSCRIPTEN
check_ints(a_get(),1,49);
# else
/* FIXME: gctest fails unless check_ints(a_get(), ...) are skipped. */
# endif
for (i = 0; i < 50; i++) {
check_ints(b,1,50);
b = reverse(reverse(b));
}
check_ints(b,1,50);
# ifndef EMSCRIPTEN
check_ints(a_get(),1,49);
# endif
for (i = 0; i < 10 * (NTHREADS+1); i++) {
# if (defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)) \
&& (NTHREADS > 0)
if (i % 10 == 0) fork_a_thread();
# endif
/* This maintains the invariant that a always points to a list */
/* of 49 integers. Thus, this is thread safe without locks, */
/* assuming acquire/release barriers in a_get/set() and atomic */
/* pointer assignments (otherwise, e.g., check_ints() may see */
/* an uninitialized object returned by GC_MALLOC). */
a_set(reverse(reverse(a_get())));
# if !defined(AT_END) && !defined(THREADS)
/* This is not thread safe, since realloc explicitly deallocates */
a_set(checkOOM(GC_REALLOC(a_get(), (i & 1) != 0 ? 500 : 8200)));
AO_fetch_and_add1(&realloc_count);
# endif
}
# ifndef EMSCRIPTEN
check_ints(a_get(),1,49);
# endif
check_ints(b,1,50);
/* Restore c and d values. */
c = (sexpr)((char *)c - sizeof(char *));
d = (sexpr)((char *)d - sizeof(char *));
void set_print_procs(void)
{
/* Set these global variables just once to avoid TSan false positives. */
A.dummy = 17;
GC_is_valid_displacement_print_proc = fail_proc1;
GC_is_visible_print_proc = fail_proc1;
}
static void uniq(void *p, ...) {
va_list a;
void *q[100];
int n = 0, i, j;
q[n++] = p;
va_start(a,p);
for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
va_end(a);
for (i=0; i<n; i++)
for (j=0; j<i; j++)
if (q[i] == q[j]) {
GC_printf(
"Apparently failed to mark from some function arguments.\n"
"Perhaps GC_push_regs was configured incorrectly?\n"
);
FAIL;
}
}
GC_FREE(0);
# ifdef THREADS
if (!GC_thread_is_registered() && GC_is_init_called()) {
GC_printf("Current thread is not registered with GC\n");
FAIL;
}
# endif
test_tinyfl();
# ifndef DBG_HDRS_ALL
AO_fetch_and_add1(&collectable_count);
x = (char*)GC_malloc(7);
CHECK_OUT_OF_MEMORY(x);
AO_fetch_and_add1(&collectable_count);
y = (char*)GC_malloc(7);
CHECK_OUT_OF_MEMORY(y);
if (GC_size(x) != 8 && GC_size(y) != MIN_WORDS * sizeof(GC_word)) {
GC_printf("GC_size produced unexpected results\n");
FAIL;
}
AO_fetch_and_add1(&collectable_count);
x = (char*)GC_malloc(15);
CHECK_OUT_OF_MEMORY(x);
if (GC_size(x) != 16) {
GC_printf("GC_size produced unexpected results 2\n");
FAIL;
}
AO_fetch_and_add1(&collectable_count);
x = (char*)GC_malloc(0);
CHECK_OUT_OF_MEMORY(x);
if (GC_size(x) != MIN_WORDS * sizeof(GC_word)) {
GC_printf("GC_malloc(0) failed: GC_size returns %lu\n",
(unsigned long)GC_size(x));
FAIL;
}
AO_fetch_and_add1(&uncollectable_count);
x = (char*)GC_malloc_uncollectable(0);
CHECK_OUT_OF_MEMORY(x);
if (GC_size(x) != MIN_WORDS * sizeof(GC_word)) {
GC_printf("GC_malloc_uncollectable(0) failed\n");
FAIL;
}
AO_fetch_and_add1(&collectable_count);
x = (char*)GC_malloc(16);
CHECK_OUT_OF_MEMORY(x);
if (GC_base(GC_PTR_ADD(x, 13)) != x) {
GC_printf("GC_base(heap ptr) produced incorrect result\n");
FAIL;
}
if (!GC_is_heap_ptr(x)) {
GC_printf("GC_is_heap_ptr(heap_ptr) produced incorrect result\n");
FAIL;
}
if (GC_is_heap_ptr(&x)) {
GC_printf("GC_is_heap_ptr(&local_var) produced incorrect result\n");
FAIL;
}
if (GC_is_heap_ptr((void *)&fail_count) || GC_is_heap_ptr(NULL)) {
GC_printf("GC_is_heap_ptr(&global_var) produced incorrect result\n");
FAIL;
}
(void)GC_PRE_INCR(x, 0);
(void)GC_POST_INCR(x);
(void)GC_POST_DECR(x);
if (GC_base(x) != x) {
GC_printf("Bad INCR/DECR result\n");
FAIL;
}
y = (char *)(GC_word)fail_proc1;
# ifndef PCR
if (GC_base(y) != 0) {
GC_printf("GC_base(fn_ptr) produced incorrect result\n");
FAIL;
}
# endif
if (GC_same_obj(x+5, x) != x + 5) {
GC_printf("GC_same_obj produced incorrect result\n");
FAIL;
}
if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
GC_printf("GC_is_visible produced incorrect result\n");
FAIL;
}
z = (char**)GC_malloc(8);
CHECK_OUT_OF_MEMORY(z);
AO_fetch_and_add1(&collectable_count);
GC_PTR_STORE(z, x);
GC_end_stubborn_change(z);
if (*z != x) {
GC_printf("GC_PTR_STORE failed: %p != %p\n", (void *)(*z), (void *)x);
FAIL;
}
# if !defined(IA64) && !defined(POWERPC)
if (!TEST_FAIL_COUNT(1)) {
/* On POWERPCs function pointers point to a descriptor in the */
/* data segment, so there should have been no failures. */
/* The same applies to IA64. */
GC_printf("GC_is_visible produced wrong failure indication\n");
FAIL;
}
# endif
if (GC_is_valid_displacement(y) != y
|| GC_is_valid_displacement(x) != x
|| GC_is_valid_displacement(x + 3) != x + 3) {
GC_printf("GC_is_valid_displacement produced incorrect result\n");
FAIL;
}
{
size_t i;
(void)GC_malloc(17);
AO_fetch_and_add1(&collectable_count);
for (i = sizeof(GC_word); i < 512; i *= 2) {
GC_word result = (GC_word) GC_memalign(i, 17);
if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
}
}
# ifndef ALL_INTERIOR_POINTERS
# if defined(POWERPC)
if (!TEST_FAIL_COUNT(1))
# else
if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2))
# endif
{
GC_printf(
"GC_is_valid_displacement produced wrong failure indication\n");
FAIL;
}
# endif
# endif /* DBG_HDRS_ALL */
/* Test floating point alignment */
{
double *dp = GC_NEW(double);
CHECK_OUT_OF_MEMORY(dp);
AO_fetch_and_add1(&collectable_count);
*dp = 1.0;
dp = GC_NEW(double);
CHECK_OUT_OF_MEMORY(dp);
AO_fetch_and_add1(&collectable_count);
*dp = 1.0;
}
/* Test size 0 allocation a bit more */
{
size_t i;
for (i = 0; i < 10000; ++i) {
(void)checkOOM(GC_MALLOC(0));
AO_fetch_and_add1(&collectable_count);
GC_FREE(checkOOM(GC_MALLOC(0)));
(void)checkOOM(GC_MALLOC_ATOMIC(0));
AO_fetch_and_add1(&atomic_count);
GC_FREE(checkOOM(GC_MALLOC_ATOMIC(0)));
test_generic_malloc_or_special(GC_malloc_atomic(1));
AO_fetch_and_add1(&atomic_count);
GC_FREE(checkOOM(GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(1)));
GC_FREE(checkOOM(GC_MALLOC_IGNORE_OFF_PAGE(2)));
}
}
thr_hndl_sb.gc_thread_handle = GC_get_my_stackbottom(&thr_hndl_sb.sb);
# ifdef GC_GCJ_SUPPORT
GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
# endif
/* Make sure that fn arguments are visible to the collector. */
uniq(
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
(void *)0);
/* GC_malloc(0) must return NULL or something we can deallocate. */
GC_free(checkOOM(GC_malloc(0)));
GC_free(checkOOM(GC_malloc_atomic(0)));
GC_free(checkOOM(GC_malloc(0)));
GC_free(checkOOM(GC_malloc_atomic(0)));
# ifndef NO_TEST_HANDLE_FORK
GC_atfork_prepare();
pid = fork();
if (pid != 0) {
GC_atfork_parent();
if (pid == -1) {
GC_printf("Process fork failed\n");
FAIL;
}
if (print_stats)
GC_log_printf("Forked child process, pid= %ld\n", (long)pid);
if (waitpid(pid, &wstatus, 0) == -1) {
GC_printf("Wait for child process failed\n");
FAIL;
}
if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
GC_printf("Child process failed, pid= %ld, status= 0x%x\n",
(long)pid, wstatus);
FAIL;
}
} else {
pid_t child_pid = getpid();
GC_atfork_child();
if (print_stats)
GC_log_printf("Started a child process, pid= %ld\n",
(long)child_pid);
# ifdef PARALLEL_MARK
GC_gcollect(); /* no parallel markers */
# endif
GC_start_mark_threads();
GC_gcollect();
# ifdef THREADS
/* Skip "Premature finalization" check in the */
/* child process because there could be a chance */
/* that some other thread of the parent was */
/* executing mktree at the moment of fork. */
dropped_something = 1;
# endif
tree_test();
# if !defined(DBG_HDRS_ALL) && !defined(NO_TYPED_TEST)
typed_test();
# endif
# ifdef THREADS
if (print_stats)
GC_log_printf("Starting tiny reverse test, pid= %ld\n",
(long)child_pid);
tiny_reverse_test(0);
GC_gcollect();
# endif
if (print_stats)
GC_log_printf("Finished a child process, pid= %ld\n",
(long)child_pid);
exit(0);
}
# endif
(void)GC_call_with_alloc_lock(set_stackbottom, &thr_hndl_sb);
/* Repeated list reversal test. */
# ifndef NO_CLOCK
GET_TIME(start_time);
# endif
reverse_test();
# ifndef NO_CLOCK
if (print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
GC_log_printf("Finished reverse_test at time %u (%p)\n",
(unsigned) time_diff, (void *)&start_time);
}
# endif
# if !defined(DBG_HDRS_ALL) && !defined(NO_TYPED_TEST)
typed_test();
# ifndef NO_CLOCK
if (print_stats) {
CLOCK_TYPE typed_time;
GET_TIME(tree_time);
time_diff = MS_TIME_DIFF(tree_time, start_time);
GC_log_printf("Finished tree_test at time %u (%p)\n",
(unsigned) time_diff, (void *)&start_time);
}
# endif
/* Run reverse_test a second time, so we hopefully notice corruption. */
reverse_test();
# ifndef NO_CLOCK
if (print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
GC_log_printf("Finished second reverse_test at time %u (%p)\n",
(unsigned)time_diff, (void *)&start_time);
}
# endif
/* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
/* AO_fetch_and_add1() may be unavailable to update a counter. */
(void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
# ifndef NO_CLOCK
if (print_stats)
GC_log_printf("Finished %p\n", (void *)&start_time);
# endif
}
/* Execute some tests after termination of other test threads (if any). */
void run_single_threaded_test(void) {
GC_disable();
GC_FREE(checkOOM(GC_MALLOC(100)));
GC_enable();
}
void GC_CALLBACK reachable_objs_counter(void *obj, size_t size,
void *pcounter)
{
if (0 == size) {
GC_printf("Reachable object has zero size\n");
FAIL;
}
if (GC_base(obj) != obj) {
GC_printf("Invalid reachable object base passed by enumerator: %p\n",
obj);
FAIL;
}
if (GC_size(obj) != size) {
GC_printf("Invalid reachable object size passed by enumerator: %lu\n",
(unsigned long)size);
FAIL;
}
(*(unsigned *)pcounter)++;
}
int main(void)
{
# if NTHREADS > 0
pthread_t th[NTHREADS];
int i, nthreads;
# endif
pthread_attr_t attr;
int code;
# ifdef GC_IRIX_THREADS
/* Force a larger stack to be preallocated */
/* Since the initial can't always grow later. */
*((volatile char *)&code - 1024*1024) = 0; /* Require 1 MB */
# endif /* GC_IRIX_THREADS */
# if defined(GC_HPUX_THREADS)
/* Default stack size is too small, especially with the 64 bit ABI */
/* Increase it. */
if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
GC_printf("pthread_default_stacksize_np failed\n");
}
# endif /* GC_HPUX_THREADS */
# ifdef PTW32_STATIC_LIB
pthread_win32_process_attach_np ();
pthread_win32_thread_attach_np ();
# endif
# if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY) \
&& !defined(DARWIN_DONT_PARSE_STACK) && !defined(THREAD_LOCAL_ALLOC)
/* Test with the Darwin implicit thread registration. */
GC_use_threads_discovery();
GC_printf("Using Darwin task-threads-based world stop and push\n");
# endif
GC_set_markers_count(0);
GC_COND_INIT();
if ((code = pthread_attr_init(&attr)) != 0) {
GC_printf("pthread_attr_init failed, errno= %d\n", code);
FAIL;
}
# if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
|| defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS) \
|| defined(GC_OPENBSD_THREADS)
if ((code = pthread_attr_setstacksize(&attr, 1000 * 1024)) != 0) {
GC_printf("pthread_attr_setstacksize failed, errno= %d\n", code);
FAIL;
}
# endif
n_tests = 0;
enable_incremental_mode();
GC_set_min_bytes_allocd(1);
if (GC_get_min_bytes_allocd() != 1)
FAIL;
GC_set_rate(10);
GC_set_max_prior_attempts(1);
if (GC_get_rate() != 10 || GC_get_max_prior_attempts() != 1)
FAIL;
GC_set_warn_proc(warn_proc);
# ifndef VERY_SMALL_CONFIG
if ((code = pthread_key_create(&fl_key, 0)) != 0) {
GC_printf("Key creation failed, errno= %d\n", code);
FAIL;
}
# endif
set_print_procs();
# if NTHREADS > 0
for (i = 0; i < NTHREADS; ++i) {
if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
GC_printf("Thread %d creation failed, errno= %d\n", i, code);
if (i > 0 && EAGAIN == code)
break; /* Resource temporarily unavailable */
FAIL;
}
}
nthreads = i;
for (; i <= NTHREADS; i++)
run_one_test();
for (i = 0; i < nthreads; ++i) {
if ((code = pthread_join(th[i], 0)) != 0) {
GC_printf("Thread %d join failed, errno= %d\n", i, code);
FAIL;
}
}
# else
run_one_test();
# endif
run_single_threaded_test();
check_heap_stats();
(void)fflush(stdout);
(void)pthread_attr_destroy(&attr);