typedef struct test_data_s test_data_t;
struct test_data_s {
/*
* Must be the first member -- we convert back and forth between the
* test_data_t and the hpa_shard_t;
*/
hpa_shard_t shard;
hpa_central_t central;
base_t *base;
edata_cache_t shard_edata_cache;
/* Small max */
bool deferred_work_generated = false;
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
false, &deferred_work_generated);
expect_ptr_not_null(edata, "Allocation of small max failed");
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
false, false, &deferred_work_generated);
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
/*
* Test hpa_shard_destroy, which requires as a precondition that all its
* extents have been deallocated.
*/
for (size_t i = 0; i < nlive_edatas; i++) {
edata_t *to_free = live_edatas[i];
node_remove(&tree, to_free);
pai_dalloc(tsdn, &shard->pai, to_free,
&deferred_work_generated);
}
hpa_shard_destroy(tsdn, shard);
edata_t *allocs[NALLOCS];
/*
* Allocate a mix of ways; first half from regular alloc, second half
* from alloc_batch.
*/
for (size_t i = 0; i < NALLOCS / 2; i++) {
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false,
/* frequent_reuse */ false, &deferred_work_generated);
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
}
edata_list_active_t allocs_list;
edata_list_active_init(&allocs_list);
size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
&allocs_list, &deferred_work_generated);
expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
allocs[i] = edata_list_active_first(&allocs_list);
edata_list_active_remove(&allocs_list, allocs[i]);
}
/*
* Should have allocated them contiguously, despite the differing
* methods used.
*/
void *orig_base = edata_base_get(allocs[0]);
expect_contiguous(allocs, NALLOCS);
/*
* Batch dalloc the first half, individually deallocate the second half.
*/
for (size_t i = 0; i < NALLOCS / 2; i++) {
edata_list_active_append(&allocs_list, allocs[i]);
}
pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
&deferred_work_generated);
for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
pai_dalloc(tsdn, &shard->pai, allocs[i],
&deferred_work_generated);
}
/* Reallocate (individually), and ensure reuse and contiguity. */
for (size_t i = 0; i < NALLOCS; i++) {
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
}
void *new_base = edata_base_get(allocs[0]);
expect_ptr_eq(orig_base, new_base,
"Failed to reuse the allocated memory.");
expect_contiguous(allocs, NALLOCS);
nstime_init(&defer_curtime, 0);
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
edata_t *edatas[HUGEPAGE_PAGES];
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
false, false, &deferred_work_generated);
expect_ptr_not_null(edatas[i], "Unexpected null edata");
}
hpa_shard_do_deferred_work(tsdn, shard);
expect_false(defer_hugify_called, "Hugified too early");
/* Hugification delay is set to 10 seconds in options. */
nstime_init2(&defer_curtime, 11, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_true(defer_hugify_called, "Failed to hugify");
defer_hugify_called = false;
/* Purge. Recall that dirty_mult is .25. */
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
pai_dalloc(tsdn, &shard->pai, edatas[i],
&deferred_work_generated);
}
hpa_shard_do_deferred_work(tsdn, shard);
expect_false(defer_hugify_called, "Hugified too early");
expect_true(defer_dehugify_called, "Should have dehugified");
expect_true(defer_purge_called, "Should have purged");
defer_hugify_called = false;
defer_dehugify_called = false;
defer_purge_called = false;
/*
* Refill the page. We now meet the hugification threshold; we should
* be marked for pending hugify.
*/
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
false, false, &deferred_work_generated);
expect_ptr_not_null(edatas[i], "Unexpected null edata");
}
/*
* We would be ineligible for hugification, had we not already met the
* threshold before dipping below it.
*/
pai_dalloc(tsdn, &shard->pai, edatas[0],
&deferred_work_generated);
/* Wait for the threshold again. */
nstime_init2(&defer_curtime, 22, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_true(defer_hugify_called, "Hugified too early");
expect_false(defer_dehugify_called, "Unexpected dehugify");
expect_false(defer_purge_called, "Unexpected purge");
destroy_test_data(shard);
}
TEST_END
int
main(void) {
/*
* These trigger unused-function warnings on CI runs, even if declared
* with static inline.
*/
(void)mem_tree_empty;
(void)mem_tree_last;
(void)mem_tree_search;
(void)mem_tree_nsearch;
(void)mem_tree_psearch;
(void)mem_tree_iter;
(void)mem_tree_reverse_iter;
(void)mem_tree_destroy;
return test_no_reentrancy(
test_alloc_max,
test_stress,
test_alloc_dalloc_batch,
test_defer_time);
}