/*
* This test allocates page sized slabs and checks that every two slabs have
* at least one page in between them. That page is supposed to be the guard
* page.
*/
TEST_BEGIN(test_guarded_small) {
test_skip_if(opt_prof);
/* Allocate to get sanitized pointers. */
size_t slab_sz = PAGE;
size_t sz = slab_sz / 8;
unsigned n_alloc = 0;
while (n_alloc < MAX_SMALL_ALLOCATIONS) {
void *ptr = malloc(sz);
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
small_alloc[n_alloc] = ptr;
verify_extent_guarded(tsdn, ptr);
if ((uintptr_t)ptr % PAGE == 0) {
assert_u_lt(pages_found, npages,
"Unexpectedly large number of page aligned allocs");
pages[pages_found++] = (uintptr_t)ptr;
}
if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
ends_found++;
}
n_alloc++;
if (pages_found == npages && ends_found == npages) {
break;
}
}
/* Should found the ptrs being checked for overflow and underflow. */
expect_u_eq(pages_found, npages, "Could not found the expected pages.");
expect_u_eq(ends_found, npages, "Could not found the expected pages.");
/* Verify the pages are not continuous, i.e. separated by guards. */
for (unsigned i = 0; i < npages - 1; i++) {
for (unsigned j = i + 1; j < npages; j++) {
uintptr_t ptr_diff = pages[i] > pages[j] ?
pages[i] - pages[j] : pages[j] - pages[i];
expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
"There should be at least one pages between "
"guarded slabs");
}
}
for (unsigned i = 0; i < n_alloc + 1; i++) {
free(small_alloc[i]);
}
}
TEST_END
/* Allocate to get sanitized pointers. */
size_t large_sz = SC_LARGE_MINCLASS;
for (unsigned i = 0; i < nlarge; i++) {
void *ptr = malloc(large_sz);
verify_extent_guarded(tsdn, ptr);
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
large[i] = (uintptr_t)ptr;
}
/* Verify the pages are not continuous, i.e. separated by guards. */
for (unsigned i = 0; i < nlarge; i++) {
for (unsigned j = i + 1; j < nlarge; j++) {
uintptr_t ptr_diff = large[i] > large[j] ?
large[i] - large[j] : large[j] - large[i];
expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
"There should be at least two pages between "
" guarded large allocations");
}
}
for (unsigned i = 0; i < nlarge; i++) {
free((void *)large[i]);
}
}
TEST_END
if (config_stats) {
expect_u64_eq(get_arena_npurge(arena_ind), 1,
"Expected purging to occur");
expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
"Expected purging to occur");
expect_u64_eq(get_arena_dirty_purged(arena_ind),
(sz1 + sz2 + 2 * add_guard_size) / PAGE,
"Expected purging to occur");
expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
"Expected purging to occur");
}
if (opt_retain) {
/*
* With retain, guarded extents are not mergable and will be
* cached in ecache_retained. They should be reused.
*/
void *new_p1 = do_mallocx(sz1, flags);
verify_extent_guarded(tsdn, p1);
expect_ptr_eq(p1, new_p1, "Expect to reuse p1");