/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
if (opool != NULL) {
/* Should have gotten reference to existing pool. */
TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
__func__, val, opool == pool ? "match" : "NO MATCH"));
KASSERT(opool == pool);
threadpool_put(pool, val);
error = EEXIST;
} else {
TP_LOG(("%s: created unbound pool for pri %d\n",
__func__, val));
}
return error;
}
static int
threadpool_tester_put_unbound(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct threadpool *pool;
struct sysctlnode node;
int error, val;
node = *rnode;
ctx = node.sysctl_data;
val = -1;
node.sysctl_data = &val;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return error;
if (! pri_is_valid(val))
return EINVAL;
mutex_enter(&ctx->ctx_mutex);
/* We only ever maintain a single reference. */
pool = ctx->ctx_unbound[pri_to_idx(val)];
ctx->ctx_unbound[pri_to_idx(val)] = NULL;
mutex_exit(&ctx->ctx_mutex);
if (pool == NULL) {
TP_LOG(("%s: no unbound pool for pri %d\n",
__func__, val));
return ENODEV;
}
threadpool_put(pool, val);
TP_LOG(("%s: released unbound pool for pri %d\n",
__func__, val));
return 0;
}
static int
threadpool_tester_run_unbound(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct threadpool *pool;
struct sysctlnode node;
int error, val;
node = *rnode;
ctx = node.sysctl_data;
val = -1;
node.sysctl_data = &val;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return error;
if (! pri_is_valid(val))
return EINVAL;
mutex_enter(&ctx->ctx_mutex);
pool = ctx->ctx_unbound[pri_to_idx(val)];
if (pool == NULL) {
TP_LOG(("%s: no unbound pool for pri %d\n",
__func__, val));
mutex_exit(&ctx->ctx_mutex);
return ENODEV;
}
threadpool_schedule_job(pool, &ctx->ctx_job);
TP_LOG(("%s: scheduled job on unbound pool for pri %d\n",
__func__, val));
mutex_exit(&ctx->ctx_mutex);
return 0;
}
static int
threadpool_tester_get_percpu(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct threadpool_percpu *pcpu, *opcpu = NULL;
struct sysctlnode node;
int error, val;
node = *rnode;
ctx = node.sysctl_data;
val = -1;
node.sysctl_data = &val;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return error;
if (opcpu != NULL) {
/* Should have gotten reference to existing pool. */
TP_LOG(("%s: found existing unbound pool for pri %d (%s)\n",
__func__, val, opcpu == pcpu ? "match" : "NO MATCH"));
KASSERT(opcpu == pcpu);
threadpool_percpu_put(pcpu, val);
error = EEXIST;
} else {
TP_LOG(("%s: created percpu pool for pri %d\n",
__func__, val));
}
return error;
}
static int
threadpool_tester_put_percpu(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct threadpool_percpu *pcpu;
struct sysctlnode node;
int error, val;
node = *rnode;
ctx = node.sysctl_data;
val = -1;
node.sysctl_data = &val;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return error;
if (! pri_is_valid(val))
return EINVAL;
mutex_enter(&ctx->ctx_mutex);
/* We only ever maintain a single reference. */
pcpu = ctx->ctx_percpu[pri_to_idx(val)];
ctx->ctx_percpu[pri_to_idx(val)] = NULL;
mutex_exit(&ctx->ctx_mutex);
if (pcpu == NULL) {
TP_LOG(("%s: no percpu pool for pri %d\n",
__func__, val));
return ENODEV;
}
threadpool_percpu_put(pcpu, val);
TP_LOG(("%s: released percpu pool for pri %d\n",
__func__, val));
return 0;
}
static int
threadpool_tester_run_percpu(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct threadpool_percpu *pcpu;
struct threadpool *pool;
struct sysctlnode node;
int error, val;
node = *rnode;
ctx = node.sysctl_data;
val = -1;
node.sysctl_data = &val;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return error;
if (! pri_is_valid(val))
return EINVAL;
mutex_enter(&ctx->ctx_mutex);
pcpu = ctx->ctx_percpu[pri_to_idx(val)];
if (pcpu == NULL) {
TP_LOG(("%s: no percpu pool for pri %d\n",
__func__, val));
mutex_exit(&ctx->ctx_mutex);
return ENODEV;
}
pool = threadpool_percpu_ref(pcpu);
KASSERT(pool != NULL);
threadpool_schedule_job(pool, &ctx->ctx_job);
TP_LOG(("%s: scheduled job on percpu pool for pri %d\n",
__func__, val));
mutex_exit(&ctx->ctx_mutex);
return 0;
}
static int
threadpool_tester_test_value(SYSCTLFN_ARGS)
{
struct tester_context *ctx;
struct sysctlnode node;
unsigned int val;
int error;
static int
threadpool_tester_fini(void)
{
pri_t pri;
mutex_enter(&tester_ctx.ctx_mutex);
for (pri = PRI_NONE/*-1*/; pri < PRI_COUNT; pri++) {
struct threadpool *pool =
tester_ctx.ctx_unbound[pri_to_idx(pri)];
struct threadpool_percpu *pcpu =
tester_ctx.ctx_percpu[pri_to_idx(pri)];
/*
* threadpool_cancel_job() may be called on a pool
* other than what the job is scheduled on. This is
* safe; see comment in threadpool_cancel_job_async().
*/