/*-
* Copyright (c) 2009-2016 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This material is based upon work partially supported by The
* NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NPF main: dynamic load/initialisation and unload routines.
*/
/*
* Module and device structures.
*/
#ifndef _MODULE
/*
* Modular kernels load drivers too early, and we need percpu to be inited
* So we make this misc; a better way would be to have early boot and late
* boot drivers.
*/
MODULE(MODULE_CLASS_MISC, npf, "bpf");
#else
/* This module autoloads via /dev/npf so it needs to be a driver */
MODULE(MODULE_CLASS_DRIVER, npf, "bpf");
#endif
#define NPF_IOCTL_DATA_LIMIT (4 * 1024 * 1024)
static int npf_pfil_register(bool);
static void npf_pfil_unregister(bool);
static int npf_dev_open(dev_t, int, int, lwp_t *);
static int npf_dev_close(dev_t, int, int, lwp_t *);
static int npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *);
static int npf_dev_poll(dev_t, int, lwp_t *);
static int npf_dev_read(dev_t, struct uio *, int);
/* Attach /dev/npf device. */
error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor);
if (error) {
/* It will call devsw_detach(), which is safe. */
(void)npf_fini();
}
#endif
return error;
}
/*
* Module interface.
*/
static int
npf_modcmd(modcmd_t cmd, void *arg)
{
switch (cmd) {
case MODULE_CMD_INIT:
return npf_init();
case MODULE_CMD_FINI:
return npf_fini();
case MODULE_CMD_AUTOUNLOAD:
/*
* XXX npf_autounload_p() is insufficient here. At least one other
* XXX path leads to unloading while something tries later on to
* XXX continue (perhaps closing of an open fd). For now, just
* XXX disabble autounload.
*/
return EBUSY;
default:
return ENOTTY;
}
return 0;
}
void
npfattach(int nunits)
{
/* Nothing */
}
static int
npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l)
{
/* Available only for super-user. */
if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
return EPERM;
}
return 0;
}
static int
npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l)
{
return 0;
}
static int
npf_stats_export(npf_t *npf, void *data)
{
uint64_t *fullst, *uptr = *(uint64_t **)data;
int error;
fullst = kmem_alloc(NPF_STATS_SIZE, KM_SLEEP);
npfk_stats(npf, fullst); /* will zero the buffer */
error = copyout(fullst, uptr, NPF_STATS_SIZE);
kmem_free(fullst, NPF_STATS_SIZE);
return error;
}
/*
* npfctl_switch: enable or disable packet inspection.
*/
static int
npfctl_switch(void *data)
{
const bool onoff = *(int *)data ? true : false;
int error;
static int
npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
{
npf_t *npf = npf_getkernctx();
nvlist_t *req, *resp;
int error;
/* Available only for super-user. */
if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
return EPERM;
}
switch (cmd) {
case IOC_NPF_VERSION:
*(int *)data = NPF_VERSION;
return 0;
case IOC_NPF_SWITCH:
return npfctl_switch(data);
case IOC_NPF_TABLE:
return npfctl_table(npf, data);
case IOC_NPF_STATS:
return npf_stats_export(npf, data);
case IOC_NPF_LOAD:
case IOC_NPF_SAVE:
case IOC_NPF_RULE:
case IOC_NPF_CONN_LOOKUP:
case IOC_NPF_TABLE_REPLACE:
/* nvlist_ref_t argument, handled below */
break;
default:
return EINVAL;
}
error = nvlist_copyin(data, &req, NPF_IOCTL_DATA_LIMIT);
if (__predict_false(error)) {
#ifdef __NetBSD__
/* Until the version bump. */
if (cmd != IOC_NPF_SAVE) {
return error;
}
req = nvlist_create(0);
#else
return error;
#endif
}
resp = nvlist_create(0);
switch (cmd) {
case SIOCSIFADDR:
case SIOCAIFADDR:
case SIOCDIFADDR:
#ifdef INET6
case SIOCSIFADDR_IN6:
case SIOCAIFADDR_IN6:
case SIOCDIFADDR_IN6:
#endif
KASSERT(ifa != NULL);
break;
default:
return;
}
npf_ifaddr_sync(npf, ifa->ifa_ifp);
}
/*
* npf_pfil_register: register pfil(9) hooks.
*/
static int
npf_pfil_register(bool init)
{
npf_t *npf = npf_getkernctx();
int error = 0;
SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
/* Init: interface re-config and attach/detach hook. */
if (!npf_ph_if) {
npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
if (!npf_ph_if) {
error = ENOENT;
goto out;
}
/* Check if pfil hooks are not already registered. */
if (pfil_registered) {
error = EEXIST;
goto out;
}
/* Capture points of the activity in the IP layer. */
npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
if (!npf_ph_inet && !npf_ph_inet6) {
error = ENOENT;
goto out;
}
/* Packet IN/OUT handlers for IP layer. */
if (npf_ph_inet) {
error = pfil_add_hook(npfos_packet_handler, npf,
PFIL_ALL, npf_ph_inet);
KASSERT(error == 0);
}
if (npf_ph_inet6) {
error = pfil_add_hook(npfos_packet_handler, npf,
PFIL_ALL, npf_ph_inet6);
KASSERT(error == 0);
}
/*
* It is necessary to re-sync all/any interface address tables,
* since we did not listen for any changes.
*/
npf_ifaddr_syncall(npf);
pfil_registered = true;
out:
SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
/*
* Epoch-Based Reclamation (EBR) wrappers: in NetBSD, we rely on the
* passive serialization mechanism (see pserialize(9) manual page),
* which provides sufficient guarantees for NPF.
*/