typedef struct pai_test_allocator_s pai_test_allocator_t;
struct pai_test_allocator_s {
pai_t pai;
bool alloc_fail;
size_t alloc_count;
size_t alloc_batch_count;
size_t dalloc_count;
size_t dalloc_batch_count;
/*
* We use a simple bump allocator as the implementation. This isn't
* *really* correct, since we may allow expansion into a subsequent
* allocation, but it's not like the SEC is really examining the
* pointers it gets back; this is mostly just helpful for debugging.
*/
uintptr_t next_ptr;
size_t expand_count;
bool expand_return_value;
size_t shrink_count;
bool shrink_return_value;
};
static void
test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
size_t max_bytes) {
sec_opts_t opts;
opts.nshards = 1;
opts.max_alloc = max_alloc;
opts.max_bytes = max_bytes;
/*
* Just choose reasonable defaults for these; most tests don't care so
* long as they're something reasonable.
*/
opts.bytes_after_flush = max_bytes / 2;
opts.batch_fill_extra = 4;
/*
* We end up leaking this base, but that's fine; this test is
* short-running, and SECs are arena-scoped in reality.
*/
base_t *base = base_new(TSDN_NULL, /* ind */ 123,
&ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
TEST_BEGIN(test_reuse) {
pai_test_allocator_t ta;
pai_test_allocator_init(&ta);
sec_t sec;
/*
* We can't use the "real" tsd, since we malloc within the test
* allocator hooks; we'd get lock inversion crashes. Eventually, we
* should have a way to mock tsds, but for now just don't do any
* lock-order checking.
*/
tsdn_t *tsdn = TSDN_NULL;
/*
* 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
* able to get to 33 pages in the cache before triggering a flush. We
* set the flush liimt to twice this amount, to avoid accidentally
* triggering a flush caused by the batch-allocation down the cache fill
* pathway disrupting ordering.
*/
enum { NALLOCS = 11 };
edata_t *one_page[NALLOCS];
edata_t *two_page[NALLOCS];
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
for (int i = 0; i < NALLOCS; i++) {
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
}
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
expect_zu_le(2 * NALLOCS, max_allocs,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
/*
* Free in a different order than we allocated, to make sure free-list
* separation works correctly.
*/
for (int i = NALLOCS - 1; i >= 0; i--) {
pai_dalloc(tsdn, &sec.pai, one_page[i],
&deferred_work_generated);
}
for (int i = NALLOCS - 1; i >= 0; i--) {
pai_dalloc(tsdn, &sec.pai, two_page[i],
&deferred_work_generated);
}
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
/*
* Check that the n'th most recent deallocated extent is returned for
* the n'th alloc request of a given size.
*/
for (int i = 0; i < NALLOCS; i++) {
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_eq(one_page[i], alloc1,
"Got unexpected allocation");
expect_ptr_eq(two_page[i], alloc2,
"Got unexpected allocation");
}
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
}
TEST_END
TEST_BEGIN(test_auto_flush) {
pai_test_allocator_t ta;
pai_test_allocator_init(&ta);
sec_t sec;
/* See the note above -- we can't use the real tsd. */
tsdn_t *tsdn = TSDN_NULL;
/*
* 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
* able to get to 30 pages in the cache before triggering a flush. The
* choice of NALLOCS here is chosen to match the batch allocation
* default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
* empty, even in the presence of batch allocation on fill).
* Eventually, once our allocation batching strategies become smarter,
* this should change.
*/
enum { NALLOCS = 10 };
edata_t *extra_alloc;
edata_t *allocs[NALLOCS];
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ NALLOCS * PAGE);
for (int i = 0; i < NALLOCS; i++) {
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
}
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
/* guarded */ false, /* frequent_reuse */ false,
&deferred_work_generated);
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
expect_zu_le(NALLOCS + 1, max_allocs,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
/* Free until the SEC is full, but should not have flushed yet. */
for (int i = 0; i < NALLOCS; i++) {
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
}
expect_zu_le(NALLOCS + 1, max_allocs,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
/*
* Free the extra allocation; this should trigger a flush. The internal
* flushing logic is allowed to get complicated; for now, we rely on our
* whitebox knowledge of the fact that the SEC flushes bins in their
* entirety when it decides to do so, and it has only one bin active
* right now.
*/
pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of (non-batch) deallocations");
expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
"Incorrect number of batch deallocations");
}
TEST_END
/*
* A disable and a flush are *almost* equivalent; the only difference is what
* happens afterwards; disabling disallows all future caching as well.
*/
static void
do_disable_flush_test(bool is_disable) {
pai_test_allocator_t ta;
pai_test_allocator_init(&ta);
sec_t sec;
/* See the note above -- we can't use the real tsd. */
tsdn_t *tsdn = TSDN_NULL;
enum { NALLOCS = 11 };
edata_t *allocs[NALLOCS];
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ NALLOCS * PAGE);
for (int i = 0; i < NALLOCS; i++) {
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
}
/* Free all but the last aloc. */
for (int i = 0; i < NALLOCS - 1; i++) {
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
}
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of allocations");
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
"Incorrect number of allocations");
expect_zu_eq(0, ta.dalloc_count,
"Incorrect number of (non-batch) deallocations");
expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
"Incorrect number of batch deallocations");
size_t old_dalloc_batch_count = ta.dalloc_batch_count;
/*
* If we free into a disabled SEC, it should forward to the fallback.
* Otherwise, the SEC should accept the allocation.
*/
pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
&deferred_work_generated);
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
"Incorrect number of allocations");
expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
"Incorrect number of (non-batch) deallocations");
expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
"Incorrect number of batch deallocations");
}
TEST_BEGIN(test_max_alloc_respected) {
pai_test_allocator_t ta;
pai_test_allocator_init(&ta);
sec_t sec;
/* See the note above -- we can't use the real tsd. */
tsdn_t *tsdn = TSDN_NULL;
for (size_t i = 0; i < 100; i++) {
expect_zu_eq(i, ta.alloc_count,
"Incorrect number of allocations");
expect_zu_eq(i, ta.dalloc_count,
"Incorrect number of deallocations");
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
PAGE, /* zero */ false, /* guarded */ false,
/* frequent_reuse */ false, &deferred_work_generated);
expect_ptr_not_null(edata, "Unexpected alloc failure");
expect_zu_eq(i + 1, ta.alloc_count,
"Incorrect number of allocations");
expect_zu_eq(i, ta.dalloc_count,
"Incorrect number of deallocations");
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
}
}
TEST_END
TEST_BEGIN(test_expand_shrink_delegate) {
/*
* Expand and shrink shouldn't affect sec state; they should just
* delegate to the fallback PAI.
*/
pai_test_allocator_t ta;
pai_test_allocator_init(&ta);
sec_t sec;
/* See the note above -- we can't use the real tsd. */
tsdn_t *tsdn = TSDN_NULL;
/* Both operations should have gone directly to the fallback. */
expect_zu_eq(1, ta.alloc_count, "");
expect_zu_eq(1, ta.dalloc_count, "");
}
TEST_END
static void
expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
sec_stats_t stats;
/*
* Check that the stats merging accumulates rather than overwrites by
* putting some (made up) data there to begin with.
*/
stats.bytes = 123;
sec_stats_merge(tsdn, sec, &stats);
assert_zu_le(npages * PAGE + 123, stats.bytes, "");
}
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
}
for (size_t i = 0; i < FLUSH_PAGES; i++) {
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
}
pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
/* Flush the remaining pages; stats should still work. */
for (size_t i = 0; i < FLUSH_PAGES; i++) {
pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
&deferred_work_generated);
}
bool deferred_work_generated = false;
edata_t *allocs[FLUSH_PAGES];
for (size_t i = 0; i < FLUSH_PAGES; i++) {
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false, /* frequent_reuse */
false, &deferred_work_generated);
expect_stats_pages(tsdn, &sec, 0);
}
/* Dalloc the first half of the allocations. */
for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
expect_stats_pages(tsdn, &sec, i + 1);
}