nstime_t time;
nstime_copy(&time, &decay_nstime);
expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
new_pages, "Not all pages are expected to decay in decay_ms");
nstime_init(&time, 0);
expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages), 0,
"More than zero pages are expected to instantly decay");
nstime_copy(&time, &decay_nstime);
nstime_idivide(&time, 2);
expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
new_pages / 2, "Not half of pages decay in half the decay period");
}
TEST_END
uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5;
int nepochs = 0;
for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) {
size_t dirty_pages = 0;
nstime_init(&curtime, i * time_between_calls);
bool epoch_advanced = decay_maybe_advance_epoch(&decay,
&curtime, dirty_pages);
if (epoch_advanced) {
nepochs++;
expect_zu_eq(decay_npages_limit_get(&decay), 0,
"Unexpectedly increased npages_limit");
}
}
expect_d_gt(nepochs, 0, "Epochs never advanced");
}
TEST_END
/*
* Verify that npages_limit correctly decays as the time goes.
*
* During first 'nepoch_init' epochs, add new dirty pages.
* After that, let them decay and verify npages_limit decreases.
* Then proceed with another 'nepoch_init' epochs and check that
* all dirty pages are flushed out of backlog, bringing npages_limit
* down to zero.
*/
TEST_BEGIN(test_decay) {
const uint64_t nepoch_init = 10;
/* Populate backlog with some dirty pages */
for (uint64_t i = 0; i < nepoch_init; i++) {
nstime_add(&curtime, &epochtime);
dirty_pages += dirty_pages_per_epoch;
epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
dirty_pages);
}
expect_true(epoch_advanced, "Epoch never advanced");
size_t npages_limit = decay_npages_limit_get(&decay);
expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal "
"to zero after dirty pages have been added");
/* Keep dirty pages unchanged and verify that npages_limit decreases */
for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) {
nstime_add(&curtime, &epochtime);
epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime,
dirty_pages);
if (epoch_advanced) {
size_t npages_limit_new = decay_npages_limit_get(&decay);
expect_zu_lt(npages_limit_new, npages_limit,
"napges_limit failed to decay");
npages_limit = npages_limit_new;
}
}
expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier "
"than decay_ms since last dirty page was added");
/* Completely push all dirty pages out of the backlog */
epoch_advanced = false;
for (uint64_t i = 0; i < nepoch_init; i++) {
nstime_add(&curtime, &epochtime);
epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
dirty_pages);
}
expect_true(epoch_advanced, "Epoch never advanced");
npages_limit = decay_npages_limit_get(&decay);
expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after "
"decay_ms since last bump in dirty pages");
}
TEST_END
uint64_t ns_until_purge_empty = decay_ns_until_purge(&decay, 0, 0);
expect_u64_eq(ns_until_purge_empty, DECAY_UNBOUNDED_TIME_TO_PURGE,
"Failed to return unbounded wait time for zero threshold");
const size_t dirty_pages_per_epoch = 1000;
size_t dirty_pages = 0;
bool epoch_advanced = false;
for (uint64_t i = 0; i < nepoch_init; i++) {
nstime_add(&curtime, &epochtime);
dirty_pages += dirty_pages_per_epoch;
epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
dirty_pages);
}
expect_true(epoch_advanced, "Epoch never advanced");
uint64_t ns_until_purge_all = decay_ns_until_purge(&decay,
dirty_pages, dirty_pages);
expect_u64_ge(ns_until_purge_all, decay_ns,
"Incorrectly calculated time to purge all pages");
uint64_t ns_until_purge_none = decay_ns_until_purge(&decay,
dirty_pages, 0);
expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2,
"Incorrectly calculated time to purge 0 pages");