/*
* daemon/remote.c - remote control for the unbound daemon.
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains the remote control functionality for the daemon.
* The remote control can be performed using either the commandline
* unbound-control tool, or a TLS capable web browser.
* The channel is secured using TLSv1, and certificates.
* Both the server and the client(control tool) have their own keys.
*/
#include "config.h"
#ifdef HAVE_OPENSSL_ERR_H
#include <openssl/err.h>
#endif
#ifdef HAVE_OPENSSL_DH_H
#include <openssl/dh.h>
#endif
#ifdef HAVE_OPENSSL_BN_H
#include <openssl/bn.h>
#endif
#ifdef HAVE_STDATOMIC_H
#include <stdatomic.h>
#endif
/* just for portability */
#ifdef SQ
#undef SQ
#endif
/** what to put on statistics lines between var and value, ": " or "=" */
#define SQ "="
/** Acceptable lengths of str lines */
#define MAX_CMD_STRLINE 1024
#define MAX_STDIN_STRLINE 2048
/** What number of loop iterations is too much for ipc retries */
#define IPC_LOOP_MAX 200
/** Timeout in msec for ipc socket poll. */
#define IPC_NOTIFICATION_WAIT 200
/**
* Add and open a new control port
* @param ip: ip str
* @param nr: port nr
* @param list: list head
* @param noproto_is_err: if lack of protocol support is an error.
* @param cfg: config with username for chown of unix-sockets.
* @return false on failure.
*/
static int
add_open(const char* ip, int nr, struct listen_port** list, int noproto_is_err,
struct config_file* cfg)
{
struct addrinfo hints;
struct addrinfo* res;
struct listen_port* n;
int noproto = 0;
int fd, r;
char port[15];
snprintf(port, sizeof(port), "%d", nr);
port[sizeof(port)-1]=0;
memset(&hints, 0, sizeof(hints));
log_assert(ip);
if(ip[0] == '/') {
/* This looks like a local socket */
fd = create_local_accept_sock(ip, &noproto, cfg->use_systemd);
/*
* Change socket ownership and permissions so users other
* than root can access it provided they are in the same
* group as the user we run as.
*/
if(fd != -1) {
#ifdef HAVE_CHOWN
chmod(ip, (mode_t)(S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP));
if (cfg->username && cfg->username[0] &&
cfg_uid != (uid_t)-1) {
if(chown(ip, cfg_uid, cfg_gid) == -1)
verbose(VERB_QUERY, "cannot chown %u.%u %s: %s",
(unsigned)cfg_uid, (unsigned)cfg_gid,
ip, strerror(errno));
}
#else
(void)cfg;
#endif
}
} else {
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_NUMERICHOST;
if((r = getaddrinfo(ip, port, &hints, &res)) != 0 || !res) {
#ifdef USE_WINSOCK
if(!noproto_is_err && r == EAI_NONAME) {
/* tried to lookup the address as name */
return 1; /* return success, but do nothing */
}
#endif /* USE_WINSOCK */
log_err("control interface %s:%s getaddrinfo: %s %s",
ip?ip:"default", port, gai_strerror(r),
#ifdef EAI_SYSTEM
r==EAI_SYSTEM?(char*)strerror(errno):""
#else
""
#endif
);
return 0;
}
/* perform the first nonblocking read already, for windows,
* so it can return wouldblock. could be faster too. */
(void)remote_control_callback(n->c, n, NETEVENT_NOERROR, NULL);
return 0;
}
/** delete from list */
static void
state_list_remove_elem(struct rc_state** list, struct comm_point* c)
{
while(*list) {
if( (*list)->c == c) {
*list = (*list)->next;
return;
}
list = &(*list)->next;
}
}
/** decrease active count and remove commpoint from busy list */
static void
clean_point(struct daemon_remote* rc, struct rc_state* s)
{
if(!s->rc) {
/* the state has been picked up and moved away */
free(s);
return;
}
state_list_remove_elem(&rc->busy_list, s->c);
rc->active --;
if(s->ssl) {
SSL_shutdown(s->ssl);
SSL_free(s->ssl);
}
comm_point_delete(s->c);
free(s);
}
/** print text over the ssl connection */
static int
ssl_print_vmsg(RES* ssl, const char* format, va_list args)
{
char msg[65535];
vsnprintf(msg, sizeof(msg), format, args);
return ssl_print_text(ssl, msg);
}
/** printf style printing to the ssl connection */
int ssl_printf(RES* ssl, const char* format, ...)
{
va_list args;
int ret;
va_start(args, format);
ret = ssl_print_vmsg(ssl, format, args);
va_end(args);
return ret;
}
/** skip whitespace, return new pointer into string */
static char*
skipwhite(char* str)
{
/* EOS \0 is not a space */
while( isspace((unsigned char)*str) )
str++;
return str;
}
/** send the OK to the control client */
static void send_ok(RES* ssl)
{
(void)ssl_printf(ssl, "ok\n");
}
/** tell other processes to execute the command */
static void
distribute_cmd(struct daemon_remote* rc, RES* ssl, char* cmd)
{
int i;
if(!cmd || !ssl)
return;
/* skip i=0 which is me */
for(i=1; i<rc->worker->daemon->num; i++) {
worker_send_cmd(rc->worker->daemon->workers[i],
worker_cmd_remote);
if(!tube_write_msg(rc->worker->daemon->workers[i]->cmd,
(uint8_t*)cmd, strlen(cmd)+1, 0)) {
(void)ssl_printf(ssl, "error could not distribute cmd\n");
return;
}
}
}
/** do the stop command */
static void
do_stop(RES* ssl, struct worker* worker)
{
worker->need_to_exit = 1;
comm_base_exit(worker->base);
send_ok(ssl);
}
/** do the reload command */
static void
do_reload(RES* ssl, struct worker* worker, int reuse_cache)
{
worker->reuse_cache = reuse_cache;
worker->need_to_exit = 0;
comm_base_exit(worker->base);
send_ok(ssl);
}
/**
* Local info for deletion functions
*/
struct del_info {
/** worker */
struct worker* worker;
/** name to delete */
uint8_t* name;
/** length */
size_t len;
/** labels */
int labs;
/** time to invalidate to */
time_t expired;
/** number of rrsets removed */
size_t num_rrsets;
/** number of msgs removed */
size_t num_msgs;
/** number of key entries removed */
size_t num_keys;
/** length of addr */
socklen_t addrlen;
/** socket address for host deletion */
struct sockaddr_storage addr;
/** if cachedb information should be flushed too */
int remcachedb;
};
/** callback to delete negative and servfail rrsets */
static void
negative_del_rrset(struct lruhash_entry* e, void* arg)
{
/* entry is locked */
struct del_info* inf = (struct del_info*)arg;
struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)e->key;
struct packed_rrset_data* d = (struct packed_rrset_data*)e->data;
/* delete the parentside negative cache rrsets,
* these are nameserver rrsets that failed lookup, rdata empty */
if((k->rk.flags & PACKED_RRSET_PARENT_SIDE) && d->count == 1 &&
d->rrsig_count == 0 && d->rr_len[0] == 0 &&
d->ttl > inf->expired) {
d->ttl = inf->expired;
inf->num_rrsets++;
}
}
/** callback to delete negative and servfail messages */
static void
negative_del_msg(struct lruhash_entry* e, void* arg)
{
/* entry is locked */
struct del_info* inf = (struct del_info*)arg;
struct reply_info* d = (struct reply_info*)e->data;
/* rcode not NOERROR: NXDOMAIN, SERVFAIL, ..: an nxdomain or error
* or NOERROR rcode with ANCOUNT==0: a NODATA answer */
if((FLAGS_GET_RCODE(d->flags) != 0 || d->an_numrrsets == 0) &&
d->ttl > inf->expired) {
d->ttl = inf->expired;
d->prefetch_ttl = inf->expired;
d->serve_expired_ttl = inf->expired;
inf->num_msgs++;
#ifdef USE_CACHEDB
if(inf->remcachedb && inf->worker->env.cachedb_enabled)
cachedb_msg_remove_qinfo(&inf->worker->env,
&((struct msgreply_entry*)e->key)->key);
#endif
}
}
/** callback to delete negative key entries */
static void
negative_del_kcache(struct lruhash_entry* e, void* arg)
{
/* entry is locked */
struct del_info* inf = (struct del_info*)arg;
struct key_entry_data* d = (struct key_entry_data*)e->data;
/* could be bad because of lookup failure on the DS, DNSKEY, which
* was nxdomain or servfail, and thus a result of negative lookups */
if(d->isbad && d->ttl > inf->expired) {
d->ttl = inf->expired;
inf->num_keys++;
}
}
/** remove all negative(NODATA,NXDOMAIN), and servfail messages from cache */
static void
do_flush_negative(RES* ssl, struct worker* worker, char* arg)
{
struct del_info inf;
int pc = 0; /* '+c' option */
if(!parse_remcachedb(ssl, &arg, &pc))
return;
/* what we do is to set them all expired */
inf.worker = worker;
inf.expired = *worker->env.now;
inf.expired -= 3; /* handle 3 seconds skew between threads */
inf.num_rrsets = 0;
inf.num_msgs = 0;
inf.num_keys = 0;
inf.remcachedb = pc;
slabhash_traverse(&worker->env.rrset_cache->table, 1,
&negative_del_rrset, &inf);
/** print root forwards */
static int
print_root_fwds(RES* ssl, struct iter_forwards* fwds, uint8_t* root)
{
struct delegpt* dp;
int nolock = 0;
dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN, nolock);
if(!dp) {
return ssl_printf(ssl, "off (using root hints)\n");
}
/* if dp is returned it must be the root */
log_assert(query_dname_compare(dp->name, root)==0);
if(!ssl_print_name_dp(ssl, NULL, root, LDNS_RR_CLASS_IN, dp)) {
lock_rw_unlock(&fwds->lock);
return 0;
}
lock_rw_unlock(&fwds->lock);
return 1;
}
/** parse args into delegpt */
static struct delegpt*
parse_delegpt(RES* ssl, char* args, uint8_t* nm)
{
/* parse args and add in */
char* p = args;
char* todo;
struct delegpt* dp = delegpt_create_mlc(nm);
struct sockaddr_storage addr;
socklen_t addrlen;
char* auth_name;
if(!dp) {
(void)ssl_printf(ssl, "error out of memory\n");
return NULL;
}
while(p) {
todo = p;
p = strchr(p, ' '); /* find next spot, if any */
if(p) {
*p++ = 0; /* end this spot */
p = skipwhite(p); /* position at next spot */
}
/* parse address */
if(!authextstrtoaddr(todo, &addr, &addrlen, &auth_name)) {
uint8_t* dname= NULL;
int port;
dname = authextstrtodname(todo, &port, &auth_name);
if(!dname) {
(void)ssl_printf(ssl, "error cannot parse"
" '%s'\n", todo);
delegpt_free_mlc(dp);
return NULL;
}
#if ! defined(HAVE_SSL_SET1_HOST) && ! defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
if(auth_name)
log_err("no name verification functionality in "
"ssl library, ignored name for %s", todo);
#endif
if(!delegpt_add_ns_mlc(dp, dname, 0, auth_name, port)) {
(void)ssl_printf(ssl, "error out of memory\n");
free(dname);
delegpt_free_mlc(dp);
return NULL;
}
} else {
#if ! defined(HAVE_SSL_SET1_HOST) && ! defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
if(auth_name)
log_err("no name verification functionality in "
"ssl library, ignored name for %s", todo);
#endif
/* add address */
if(!delegpt_add_addr_mlc(dp, &addr, addrlen, 0, 0,
auth_name, -1)) {
(void)ssl_printf(ssl, "error out of memory\n");
delegpt_free_mlc(dp);
return NULL;
}
}
}
dp->has_parent_side_NS = 1;
return dp;
}
/** do the forward command */
static void
do_forward(RES* ssl, struct worker* worker, char* args)
{
struct iter_forwards* fwd = worker->env.fwds;
uint8_t* root = (uint8_t*)"\000";
int nolock = 0;
if(!fwd) {
(void)ssl_printf(ssl, "error: structure not allocated\n");
return;
}
if(args == NULL || args[0] == 0) {
(void)print_root_fwds(ssl, fwd, root);
return;
}
/* set root forwards for this thread. since we are in remote control
* the actual mesh is not running, so we can freely edit it. */
/* delete all the existing queries first */
mesh_delete_all(worker->env.mesh);
if(strcmp(args, "off") == 0) {
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root, nolock);
} else {
struct delegpt* dp;
if(!(dp = parse_delegpt(ssl, args, root)))
return;
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
return;
}
}
send_ok(ssl);
}
/** get age for the mesh state */
static void
get_mesh_age(struct mesh_state* m, char* buf, size_t len,
struct module_env* env)
{
if(m->reply_list) {
struct timeval d;
struct mesh_reply* r = m->reply_list;
/* last reply is the oldest */
while(r && r->next)
r = r->next;
timeval_subtract(&d, env->now_tv, &r->start_time);
snprintf(buf, len, ARG_LL "d.%6.6d",
(long long)d.tv_sec, (int)d.tv_usec);
} else {
snprintf(buf, len, "-");
}
}
/** get status of a mesh state */
static void
get_mesh_status(struct mesh_area* mesh, struct mesh_state* m,
char* buf, size_t len)
{
enum module_ext_state s = m->s.ext_state[m->s.curmod];
const char *modname = mesh->mods.mod[m->s.curmod]->name;
size_t l;
if(strcmp(modname, "iterator") == 0 && s == module_wait_reply &&
m->s.minfo[m->s.curmod]) {
/* break into iterator to find out who its waiting for */
struct iter_qstate* qstate = (struct iter_qstate*)
m->s.minfo[m->s.curmod];
struct outbound_list* ol = &qstate->outlist;
struct outbound_entry* e;
snprintf(buf, len, "%s wait for", modname);
l = strlen(buf);
buf += l; len -= l;
if(ol->first == NULL)
snprintf(buf, len, " (empty_list)");
for(e = ol->first; e; e = e->next) {
snprintf(buf, len, " ");
l = strlen(buf);
buf += l; len -= l;
addr_to_str(&e->qsent->addr, e->qsent->addrlen,
buf, len);
l = strlen(buf);
buf += l; len -= l;
}
} else if(s == module_wait_subquery) {
/* look in subs from mesh state to see what */
char nm[LDNS_MAX_DOMAINLEN];
struct mesh_state_ref* sub;
snprintf(buf, len, "%s wants", modname);
l = strlen(buf);
buf += l; len -= l;
if(m->sub_set.count == 0)
snprintf(buf, len, " (empty_list)");
RBTREE_FOR(sub, struct mesh_state_ref*, &m->sub_set) {
char* t = sldns_wire2str_type(sub->s->s.qinfo.qtype);
char* c = sldns_wire2str_class(sub->s->s.qinfo.qclass);
dname_str(sub->s->s.qinfo.qname, nm);
snprintf(buf, len, " %s %s %s", (t?t:"TYPE??"),
(c?c:"CLASS??"), nm);
l = strlen(buf);
buf += l; len -= l;
free(t);
free(c);
}
} else {
snprintf(buf, len, "%s is %s", modname, strextstate(s));
}
}
/** do the dump_requestlist command */
static void
do_dump_requestlist(RES* ssl, struct worker* worker)
{
struct mesh_area* mesh;
struct mesh_state* m;
int num = 0;
char buf[LDNS_MAX_DOMAINLEN];
char timebuf[32];
char statbuf[10240];
if(!ssl_printf(ssl, "thread #%d\n", worker->thread_num))
return;
if(!ssl_printf(ssl, "# type cl name seconds module status\n"))
return;
/* show worker mesh contents */
mesh = worker->env.mesh;
if(!mesh) return;
RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
char* t = sldns_wire2str_type(m->s.qinfo.qtype);
char* c = sldns_wire2str_class(m->s.qinfo.qclass);
dname_str(m->s.qinfo.qname, buf);
get_mesh_age(m, timebuf, sizeof(timebuf), &worker->env);
get_mesh_status(mesh, m, statbuf, sizeof(statbuf));
if(!ssl_printf(ssl, "%3d %4s %2s %s %s %s\n",
num, (t?t:"TYPE??"), (c?c:"CLASS??"), buf, timebuf,
statbuf)) {
free(t);
free(c);
return;
}
num++;
free(t);
free(c);
}
}
/** structure for argument data for dump infra host */
struct infra_arg {
/** the infra cache */
struct infra_cache* infra;
/** the SSL connection */
RES* ssl;
/** the time now */
time_t now;
/** ssl failure? stop writing and skip the rest. If the tcp
* connection is broken, and writes fail, we then stop writing. */
int ssl_failed;
};
/** do the view_list_local_zones command */
static void
do_view_list_local_zones(RES* ssl, struct worker* worker, char* arg)
{
struct view* v = views_find_view(worker->env.views,
arg, 0 /* get read lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
}
if(v->local_zones) {
do_list_local_zones(ssl, v->local_zones);
}
lock_rw_unlock(&v->lock);
}
/** do the view_list_local_data command */
static void
do_view_list_local_data(RES* ssl, struct worker* worker, char* arg)
{
struct view* v = views_find_view(worker->env.views,
arg, 0 /* get read lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
}
if(v->local_zones) {
do_list_local_data(ssl, worker, v->local_zones);
}
lock_rw_unlock(&v->lock);
}
/** struct for user arg ratelimit list */
struct ratelimit_list_arg {
/** the infra cache */
struct infra_cache* infra;
/** the SSL to print to */
RES* ssl;
/** all or only ratelimited */
int all;
/** current time */
time_t now;
/** if backoff is enabled */
int backoff;
};
#define ip_ratelimit_list_arg ratelimit_list_arg
/** list items in the ratelimit table */
static void
rate_list(struct lruhash_entry* e, void* arg)
{
struct ratelimit_list_arg* a = (struct ratelimit_list_arg*)arg;
struct rate_key* k = (struct rate_key*)e->key;
struct rate_data* d = (struct rate_data*)e->data;
char buf[LDNS_MAX_DOMAINLEN];
int lim = infra_find_ratelimit(a->infra, k->name, k->namelen);
int max = infra_rate_max(d, a->now, a->backoff);
if(a->all == 0) {
if(max < lim)
return;
}
dname_str(k->name, buf);
ssl_printf(a->ssl, "%s %d limit %d\n", buf, max, lim);
}
/** list items in the ip_ratelimit table */
static void
ip_rate_list(struct lruhash_entry* e, void* arg)
{
char ip[128];
struct ip_ratelimit_list_arg* a = (struct ip_ratelimit_list_arg*)arg;
struct ip_rate_key* k = (struct ip_rate_key*)e->key;
struct ip_rate_data* d = (struct ip_rate_data*)e->data;
int lim = infra_ip_ratelimit;
int max = infra_rate_max(d, a->now, a->backoff);
if(a->all == 0) {
if(max < lim)
return;
}
addr_to_str(&k->addr, k->addrlen, ip, sizeof(ip));
ssl_printf(a->ssl, "%s %d limit %d\n", ip, max, lim);
}
/** check for name with end-of-string, space or tab after it */
static int
cmdcmp(char* p, const char* cmd, size_t len)
{
return strncmp(p,cmd,len)==0 && (p[len]==0||p[len]==' '||p[len]=='\t');
}
/* read the command line */
if(!ssl_read_line(res, buf, sizeof(buf))) {
return;
}
snprintf(pre, sizeof(pre), "UBCT%d ", UNBOUND_CONTROL_VERSION);
if(strcmp(magic, pre) != 0) {
verbose(VERB_QUERY, "control connection had bad "
"version %s, cmd: %s", magic, buf);
ssl_printf(res, "error version mismatch\n");
return;
}
verbose(VERB_DETAIL, "control cmd: %s", buf);
/* figure out what to do */
execute_cmd(rc, s, res, buf, rc->worker);
}
/** handle SSL_do_handshake changes to the file descriptor to wait for later */
static int
remote_handshake_later(struct daemon_remote* rc, struct rc_state* s,
struct comm_point* c, int r, int r2)
{
if(r2 == SSL_ERROR_WANT_READ) {
if(s->shake_state == rc_hs_read) {
/* try again later */
return 0;
}
s->shake_state = rc_hs_read;
comm_point_listen_for_rw(c, 1, 0);
return 0;
} else if(r2 == SSL_ERROR_WANT_WRITE) {
if(s->shake_state == rc_hs_write) {
/* try again later */
return 0;
}
s->shake_state = rc_hs_write;
comm_point_listen_for_rw(c, 0, 1);
return 0;
} else {
if(r == 0)
log_err("remote control connection closed prematurely");
log_addr(VERB_OPS, "failed connection from",
&s->c->repinfo.remote_addr, s->c->repinfo.remote_addrlen);
log_crypto_err_io("remote control failed ssl", r2);
clean_point(rc, s);
}
return 0;
}
int remote_control_callback(struct comm_point* c, void* arg, int err,
struct comm_reply* ATTR_UNUSED(rep))
{
RES res;
struct rc_state* s = (struct rc_state*)arg;
struct daemon_remote* rc = s->rc;
int r;
if(err != NETEVENT_NOERROR) {
if(err==NETEVENT_TIMEOUT)
log_err("remote control timed out");
clean_point(rc, s);
return 0;
}
if(s->ssl) {
/* (continue to) setup the SSL connection */
ERR_clear_error();
r = SSL_do_handshake(s->ssl);
if(r != 1) {
int r2 = SSL_get_error(s->ssl, r);
return remote_handshake_later(rc, s, c, r, r2);
}
s->shake_state = rc_none;
}
/* once handshake has completed, check authentication */
if (!rc->use_cert) {
verbose(VERB_ALGO, "unauthenticated remote control connection");
} else if(SSL_get_verify_result(s->ssl) == X509_V_OK) {
#ifdef HAVE_SSL_GET1_PEER_CERTIFICATE
X509* x = SSL_get1_peer_certificate(s->ssl);
#else
X509* x = SSL_get_peer_certificate(s->ssl);
#endif
if(!x) {
verbose(VERB_DETAIL, "remote control connection "
"provided no client certificate");
clean_point(rc, s);
return 0;
}
verbose(VERB_ALGO, "remote control connection authenticated");
X509_free(x);
} else {
verbose(VERB_DETAIL, "remote control connection failed to "
"authenticate with client certificate");
clean_point(rc, s);
return 0;
}
/* if OK start to actually handle the request */
res.ssl = s->ssl;
res.fd = c->fd;
handle_req(rc, s, &res);
verbose(VERB_ALGO, "remote control operation completed");
clean_point(rc, s);
return 0;
}
/**
* This routine polls a socket for readiness.
* @param fd: file descriptor, -1 uses no fd for a timer only.
* @param timeout: time in msec to wait. 0 means nonblocking test,
* -1 waits blocking for events.
* @param pollin: check for input event.
* @param pollout: check for output event.
* @param event: output variable, set to true if the event happens.
* It is false if there was an error or timeout.
* @return false is system call failure, also logged.
*/
static int
sock_poll_timeout(int fd, int timeout, int pollin, int pollout, int* event)
{
int loopcount = 0;
/* Loop if the system call returns an errno to do so, like EINTR. */
log_assert(pollin || pollout);
while(1) {
struct pollfd p, *fds;
int nfds, ret;
if(++loopcount > IPC_LOOP_MAX) {
log_err("sock_poll_timeout: loop");
if(event)
*event = 0;
return 0;
}
if(fd == -1) {
fds = NULL;
nfds = 0;
} else {
fds = &p;
nfds = 1;
memset(&p, 0, sizeof(p));
p.fd = fd;
#ifndef USE_WINSOCK
p.events = POLLERR
| POLLHUP
;
#endif
if(pollin)
p.events |= POLLIN;
if(pollout)
p.events |= POLLOUT;
}
#ifndef USE_WINSOCK
ret = poll(fds, nfds, timeout);
#else
if(fds == NULL) {
Sleep(timeout);
ret = 0;
} else {
ret = WSAPoll(fds, nfds, timeout);
}
#endif
if(ret == -1) {
#ifndef USE_WINSOCK
if(
errno == EINTR || errno == EAGAIN
# ifdef EWOULDBLOCK
|| errno == EWOULDBLOCK
# endif
) continue; /* Try again. */
#endif
/* For WSAPoll we only get errors here:
* o WSAENETDOWN
* o WSAEFAULT
* o WSAEINVAL
* o WSAENOBUFS
*/
log_err("poll: %s", sock_strerror(errno));
if(event)
*event = 0;
return 0;
} else if(ret == 0) {
/* Timeout */
if(event)
*event = 0;
return 1;
}
break;
}
if(event)
*event = 1;
return 1;
}
/** fast reload convert fast reload notification status to string */
static const char*
fr_notification_to_string(enum fast_reload_notification status)
{
switch(status) {
case fast_reload_notification_none:
return "none";
case fast_reload_notification_done:
return "done";
case fast_reload_notification_done_error:
return "done_error";
case fast_reload_notification_exit:
return "exit";
case fast_reload_notification_exited:
return "exited";
case fast_reload_notification_printout:
return "printout";
case fast_reload_notification_reload_stop:
return "reload_stop";
case fast_reload_notification_reload_ack:
return "reload_ack";
case fast_reload_notification_reload_nopause_poll:
return "reload_nopause_poll";
case fast_reload_notification_reload_start:
return "reload_start";
default:
break;
}
return "unknown";
}
#ifndef THREADS_DISABLED
/** fast reload, poll for notification incoming. True if quit */
static int
fr_poll_for_quit(struct fast_reload_thread* fr)
{
int inevent, loopexit = 0, bcount = 0;
uint32_t cmd;
ssize_t ret;
/** fast reload thread. Send notification from the fast reload thread */
static void
fr_send_notification(struct fast_reload_thread* fr,
enum fast_reload_notification status)
{
int outevent, loopexit = 0, bcount = 0;
uint32_t cmd;
ssize_t ret;
verbose(VERB_ALGO, "fast reload: send notification %s",
fr_notification_to_string(status));
/* Make a blocking attempt to send. But meanwhile stay responsive,
* once in a while for quit commands. In case the server has to quit. */
/* see if there is incoming quit signals */
if(fr_poll_for_quit(fr))
return;
cmd = status;
while(1) {
if(++loopexit > IPC_LOOP_MAX) {
log_err("fast reload: could not send notification");
return;
}
/* wait for socket to become writable */
if(!sock_poll_timeout(fr->commpair[1], IPC_NOTIFICATION_WAIT,
0, 1, &outevent)) {
log_err("fast reload: poll failed");
return;
}
if(fr_poll_for_quit(fr))
return;
if(!outevent)
continue;
ret = send(fr->commpair[1], ((char*)&cmd)+bcount,
sizeof(cmd)-bcount, 0);
if(ret == -1) {
if(
#ifndef USE_WINSOCK
errno == EINTR || errno == EAGAIN
# ifdef EWOULDBLOCK
|| errno == EWOULDBLOCK
# endif
#else
WSAGetLastError() == WSAEINTR ||
WSAGetLastError() == WSAEINPROGRESS ||
WSAGetLastError() == WSAEWOULDBLOCK
#endif
)
continue; /* Try again. */
log_err("fast reload send notification: send: %s",
sock_strerror(errno));
return;
} else if(ret+(ssize_t)bcount != sizeof(cmd)) {
bcount += ret;
if((size_t)bcount < sizeof(cmd))
continue;
}
break;
}
}
/** fast reload thread queue up text string for output */
static int
fr_output_text(struct fast_reload_thread* fr, const char* msg)
{
char* item = strdup(msg);
if(!item) {
log_err("fast reload output text: strdup out of memory");
return 0;
}
lock_basic_lock(&fr->fr_output_lock);
if(!cfg_strlist_append(fr->fr_output, item)) {
lock_basic_unlock(&fr->fr_output_lock);
/* The item is freed by cfg_strlist_append on failure. */
log_err("fast reload output text: append out of memory");
return 0;
}
lock_basic_unlock(&fr->fr_output_lock);
return 1;
}
/** fast reload thread output vmsg function */
static int
fr_output_vmsg(struct fast_reload_thread* fr, const char* format, va_list args)
{
char msg[1024];
vsnprintf(msg, sizeof(msg), format, args);
return fr_output_text(fr, msg);
}
/** fast reload thread printout function, with printf arguments */
static int fr_output_printf(struct fast_reload_thread* fr,
const char* format, ...) ATTR_FORMAT(printf, 2, 3);
/** fast reload thread printout function, prints to list and signals
* the remote control thread to move that to get written to the socket
* of the remote control connection. */
static int
fr_output_printf(struct fast_reload_thread* fr, const char* format, ...)
{
va_list args;
int ret;
va_start(args, format);
ret = fr_output_vmsg(fr, format, args);
va_end(args);
return ret;
}
/**
* Structure with constructed elements for use during fast reload.
* At the start it contains the tree items for the new config.
* After the tree items are swapped into the server, the old elements
* are kept in here. They can then be deleted.
*/
struct fast_reload_construct {
/** construct for views */
struct views* views;
/** construct for auth zones */
struct auth_zones* auth_zones;
/** construct for forwards */
struct iter_forwards* fwds;
/** construct for stubs */
struct iter_hints* hints;
/** construct for respip_set */
struct respip_set* respip_set;
/** construct for access control */
struct acl_list* acl;
/** construct for access control interface */
struct acl_list* acl_interface;
/** construct for tcp connection limit */
struct tcl_list* tcl;
/** construct for local zones */
struct local_zones* local_zones;
/** if there is response ip configuration in use */
int use_response_ip;
/** if there is an rpz zone */
int use_rpz;
/** construct for edns strings */
struct edns_strings* edns_strings;
/** construct for trust anchors */
struct val_anchors* anchors;
/** construct for nsec3 key size */
size_t* nsec3_keysize;
/** construct for nsec3 max iter */
size_t* nsec3_maxiter;
/** construct for nsec3 keyiter count */
int nsec3_keyiter_count;
/** construct for target fetch policy */
int* target_fetch_policy;
/** construct for max dependency depth */
int max_dependency_depth;
/** construct for donotquery addresses */
struct iter_donotq* donotq;
/** construct for private addresses and domains */
struct iter_priv* priv;
/** construct whitelist for capsforid names */
struct rbtree_type* caps_white;
/** construct for nat64 */
struct iter_nat64 nat64;
/** construct for wait_limits_netblock */
struct rbtree_type wait_limits_netblock;
/** construct for wait_limits_cookie_netblock */
struct rbtree_type wait_limits_cookie_netblock;
/** construct for domain limits */
struct rbtree_type domain_limits;
/** storage for the old configuration elements. The outer struct
* is allocated with malloc here, the items are from config. */
struct config_file* oldcfg;
};
/** fast reload thread, read config */
static int
fr_read_config(struct fast_reload_thread* fr, struct config_file** newcfg)
{
/* Create new config structure. */
*newcfg = config_create();
if(!*newcfg) {
if(!fr_output_printf(fr, "config_create failed: out of memory\n"))
return 0;
fr_send_notification(fr, fast_reload_notification_printout);
return 0;
}
if(fr_poll_for_quit(fr))
return 1;
/** Check if two taglists are equal. */
static int
taglist_equal(char** tagname_a, int num_tags_a, char** tagname_b,
int num_tags_b)
{
int i;
if(num_tags_a != num_tags_b)
return 0;
for(i=0; i<num_tags_a; i++) {
if(strcmp(tagname_a[i], tagname_b[i]) != 0)
return 0;
}
return 1;
}
/** Check the change from a to b is only new entries at the end. */
static int
taglist_change_at_end(char** tagname_a, int num_tags_a, char** tagname_b,
int num_tags_b)
{
if(num_tags_a < 0 || num_tags_b < 0)
return 0;
if(num_tags_a >= num_tags_b)
return 0;
/* So, b is longer than a. Check if the initial start of the two
* taglists is the same. */
if(!taglist_equal(tagname_a, num_tags_a, tagname_b, num_tags_a))
return 0;
return 1;
}
/** fast reload thread, check tag defines. */
static int
fr_check_tag_defines(struct fast_reload_thread* fr, struct config_file* newcfg)
{
/* The tags are kept in a bitlist for items. Some of them are stored
* in query info. If the tags change, then the old values are
* inaccurate. The solution is to then flush the query list.
* Unless the change only involves adding new tags at the end, that
* needs no changes. */
if(!taglist_equal(fr->worker->daemon->cfg->tagname,
fr->worker->daemon->cfg->num_tags, newcfg->tagname,
newcfg->num_tags) &&
!taglist_change_at_end(fr->worker->daemon->cfg->tagname,
fr->worker->daemon->cfg->num_tags, newcfg->tagname,
newcfg->num_tags)) {
/* The tags have changed too much, the define-tag config. */
if(fr->fr_drop_mesh)
return 1; /* already dropping queries */
fr->fr_drop_mesh = 1;
fr->worker->daemon->fast_reload_drop_mesh = fr->fr_drop_mesh;
if(!fr_output_printf(fr, "tags have changed, with "
"'define-tag', and the queries have to be dropped "
"for consistency, setting '+d'\n"))
return 0;
fr_send_notification(fr, fast_reload_notification_printout);
}
return 1;
}
/** fast reload thread, check if config item has changed, if not add to
* the explanatory string. */
static void
fr_check_changed_cfg(int cmp, const char* desc, char* str, size_t len)
{
if(cmp) {
size_t slen = strlen(str);
size_t desclen = strlen(desc);
if(slen == 0) {
snprintf(str, len, "%s", desc);
return;
}
if(len - slen < desclen+2)
return; /* It does not fit */
snprintf(str+slen, len-slen, " %s", desc);
}
}
if(changed_str[0] != 0) {
/* The new config changes some items that do not work with
* fast reload. */
if(!fr_output_printf(fr, "The config changes items that are "
"not compatible with fast_reload, perhaps do reload "
"or restart: %s", changed_str) ||
!fr_output_printf(fr, "\n"))
return 0;
fr_send_notification(fr, fast_reload_notification_printout);
return 0;
}
return 1;
}
/** fast reload thread, check nopause config items */
static int
fr_check_nopause_cfg(struct fast_reload_thread* fr, struct config_file* newcfg)
{
char changed_str[1024];
struct config_file* cfg = fr->worker->env.cfg;
if(!fr->fr_nopause)
return 1; /* The nopause is not enabled, so no problem. */
changed_str[0]=0;
if(changed_str[0] != 0) {
/* The new config changes some items that need a pause,
* to be able to update the variables. */
if(!fr_output_printf(fr, "The config changes items that need "
"the fast_reload +p option, for nopause, "
"disabled to be reloaded: %s", changed_str) ||
!fr_output_printf(fr, "\n"))
return 0;
fr_send_notification(fr, fast_reload_notification_printout);
return 0;
}
return 1;
}
/** fast reload thread, clear construct information, deletes items */
static void
fr_construct_clear(struct fast_reload_construct* ct)
{
if(!ct)
return;
auth_zones_delete(ct->auth_zones);
forwards_delete(ct->fwds);
hints_delete(ct->hints);
respip_set_delete(ct->respip_set);
local_zones_delete(ct->local_zones);
acl_list_delete(ct->acl);
acl_list_delete(ct->acl_interface);
tcl_list_delete(ct->tcl);
edns_strings_delete(ct->edns_strings);
anchors_delete(ct->anchors);
views_delete(ct->views);
free(ct->nsec3_keysize);
free(ct->nsec3_maxiter);
free(ct->target_fetch_policy);
donotq_delete(ct->donotq);
priv_delete(ct->priv);
caps_white_delete(ct->caps_white);
wait_limits_free(&ct->wait_limits_netblock);
wait_limits_free(&ct->wait_limits_cookie_netblock);
domain_limits_free(&ct->domain_limits);
/* Delete the log identity here so that the global value is not
* reset by config_delete. */
if(ct->oldcfg && ct->oldcfg->log_identity) {
free(ct->oldcfg->log_identity);
ct->oldcfg->log_identity = NULL;
}
config_delete(ct->oldcfg);
}
/** get memory for strlist */
static size_t
getmem_config_strlist(struct config_strlist* p)
{
size_t m = 0;
struct config_strlist* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->str);
return m;
}
/** get memory for str2list */
static size_t
getmem_config_str2list(struct config_str2list* p)
{
size_t m = 0;
struct config_str2list* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->str) + getmem_str(s->str2);
return m;
}
/** get memory for str3list */
static size_t
getmem_config_str3list(struct config_str3list* p)
{
size_t m = 0;
struct config_str3list* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->str) + getmem_str(s->str2)
+ getmem_str(s->str3);
return m;
}
/** get memory for strbytelist */
static size_t
getmem_config_strbytelist(struct config_strbytelist* p)
{
size_t m = 0;
struct config_strbytelist* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->str) + (s->str2?s->str2len:0);
return m;
}
/** get memory used by ifs array */
static size_t
getmem_ifs(int numifs, char** ifs)
{
size_t m = 0;
int i;
m += numifs * sizeof(char*);
for(i=0; i<numifs; i++)
m += getmem_str(ifs[i]);
return m;
}
/** get memory for config_stub */
static size_t
getmem_config_stub(struct config_stub* p)
{
size_t m = 0;
struct config_stub* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->name)
+ getmem_config_strlist(s->hosts)
+ getmem_config_strlist(s->addrs);
return m;
}
/** get memory for config_auth */
static size_t
getmem_config_auth(struct config_auth* p)
{
size_t m = 0;
struct config_auth* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->name)
+ getmem_config_strlist(s->masters)
+ getmem_config_strlist(s->urls)
+ getmem_config_strlist(s->allow_notify)
+ getmem_str(s->zonefile)
+ s->rpz_taglistlen
+ getmem_str(s->rpz_action_override)
+ getmem_str(s->rpz_log_name)
+ getmem_str(s->rpz_cname);
return m;
}
/** get memory for config_view */
static size_t
getmem_config_view(struct config_view* p)
{
size_t m = 0;
struct config_view* s;
for(s = p; s; s = s->next)
m += sizeof(*s) + getmem_str(s->name)
+ getmem_config_str2list(s->local_zones)
+ getmem_config_strlist(s->local_data)
+ getmem_config_strlist(s->local_zones_nodefault)
#ifdef USE_IPSET
+ getmem_config_strlist(s->local_zones_ipset)
#endif
+ getmem_config_str2list(s->respip_actions)
+ getmem_config_str2list(s->respip_data);
return m;
}
/** get memory used by config_file item, estimate */
static size_t
config_file_getmem(struct config_file* cfg)
{
size_t m = 0;
m += sizeof(*cfg);
m += getmem_config_strlist(cfg->proxy_protocol_port);
m += getmem_str(cfg->ssl_service_key);
m += getmem_str(cfg->ssl_service_pem);
m += getmem_str(cfg->tls_cert_bundle);
m += getmem_config_strlist(cfg->tls_additional_port);
m += getmem_config_strlist(cfg->tls_session_ticket_keys.first);
m += getmem_str(cfg->tls_ciphers);
m += getmem_str(cfg->tls_ciphersuites);
m += getmem_str(cfg->http_endpoint);
m += (cfg->outgoing_avail_ports?65536*sizeof(int):0);
m += getmem_str(cfg->target_fetch_policy);
m += getmem_str(cfg->if_automatic_ports);
m += getmem_ifs(cfg->num_ifs, cfg->ifs);
m += getmem_ifs(cfg->num_out_ifs, cfg->out_ifs);
m += getmem_config_strlist(cfg->root_hints);
m += getmem_config_stub(cfg->stubs);
m += getmem_config_stub(cfg->forwards);
m += getmem_config_auth(cfg->auths);
m += getmem_config_view(cfg->views);
m += getmem_config_strlist(cfg->donotqueryaddrs);
#ifdef CLIENT_SUBNET
m += getmem_config_strlist(cfg->client_subnet);
m += getmem_config_strlist(cfg->client_subnet_zone);
#endif
m += getmem_config_str2list(cfg->acls);
m += getmem_config_str2list(cfg->tcp_connection_limits);
m += getmem_config_strlist(cfg->caps_whitelist);
m += getmem_config_strlist(cfg->private_address);
m += getmem_config_strlist(cfg->private_domain);
m += getmem_str(cfg->chrootdir);
m += getmem_str(cfg->username);
m += getmem_str(cfg->directory);
m += getmem_str(cfg->logfile);
m += getmem_str(cfg->pidfile);
m += getmem_str(cfg->log_identity);
m += getmem_str(cfg->identity);
m += getmem_str(cfg->version);
m += getmem_str(cfg->http_user_agent);
m += getmem_str(cfg->nsid_cfg_str);
m += (cfg->nsid?cfg->nsid_len:0);
m += getmem_str(cfg->module_conf);
m += getmem_config_strlist(cfg->trust_anchor_file_list);
m += getmem_config_strlist(cfg->trust_anchor_list);
m += getmem_config_strlist(cfg->auto_trust_anchor_file_list);
m += getmem_config_strlist(cfg->trusted_keys_file_list);
m += getmem_config_strlist(cfg->domain_insecure);
m += getmem_str(cfg->val_nsec3_key_iterations);
m += getmem_config_str2list(cfg->local_zones);
m += getmem_config_strlist(cfg->local_zones_nodefault);
#ifdef USE_IPSET
m += getmem_config_strlist(cfg->local_zones_ipset);
#endif
m += getmem_config_strlist(cfg->local_data);
m += getmem_config_str3list(cfg->local_zone_overrides);
m += getmem_config_strbytelist(cfg->local_zone_tags);
m += getmem_config_strbytelist(cfg->acl_tags);
m += getmem_config_str3list(cfg->acl_tag_actions);
m += getmem_config_str3list(cfg->acl_tag_datas);
m += getmem_config_str2list(cfg->acl_view);
m += getmem_config_str2list(cfg->interface_actions);
m += getmem_config_strbytelist(cfg->interface_tags);
m += getmem_config_str3list(cfg->interface_tag_actions);
m += getmem_config_str3list(cfg->interface_tag_datas);
m += getmem_config_str2list(cfg->interface_view);
m += getmem_config_strbytelist(cfg->respip_tags);
m += getmem_config_str2list(cfg->respip_actions);
m += getmem_config_str2list(cfg->respip_data);
m += getmem_ifs(cfg->num_tags, cfg->tagname);
m += getmem_config_strlist(cfg->control_ifs.first);
m += getmem_str(cfg->server_key_file);
m += getmem_str(cfg->server_cert_file);
m += getmem_str(cfg->control_key_file);
m += getmem_str(cfg->control_cert_file);
m += getmem_config_strlist(cfg->python_script);
m += getmem_config_strlist(cfg->dynlib_file);
m += getmem_str(cfg->dns64_prefix);
m += getmem_config_strlist(cfg->dns64_ignore_aaaa);
m += getmem_str(cfg->nat64_prefix);
m += getmem_str(cfg->dnstap_socket_path);
m += getmem_str(cfg->dnstap_ip);
m += getmem_str(cfg->dnstap_tls_server_name);
m += getmem_str(cfg->dnstap_tls_cert_bundle);
m += getmem_str(cfg->dnstap_tls_client_key_file);
m += getmem_str(cfg->dnstap_tls_client_cert_file);
m += getmem_str(cfg->dnstap_identity);
m += getmem_str(cfg->dnstap_version);
m += getmem_config_str2list(cfg->ratelimit_for_domain);
m += getmem_config_str2list(cfg->ratelimit_below_domain);
m += getmem_config_str2list(cfg->edns_client_strings);
m += getmem_str(cfg->dnscrypt_provider);
m += getmem_config_strlist(cfg->dnscrypt_secret_key);
m += getmem_config_strlist(cfg->dnscrypt_provider_cert);
m += getmem_config_strlist(cfg->dnscrypt_provider_cert_rotated);
#ifdef USE_IPSECMOD
m += getmem_config_strlist(cfg->ipsecmod_whitelist);
m += getmem_str(cfg->ipsecmod_hook);
#endif
#ifdef USE_CACHEDB
m += getmem_str(cfg->cachedb_backend);
m += getmem_str(cfg->cachedb_secret);
#ifdef USE_REDIS
m += getmem_str(cfg->redis_server_host);
m += getmem_str(cfg->redis_replica_server_host);
m += getmem_str(cfg->redis_server_path);
m += getmem_str(cfg->redis_replica_server_path);
m += getmem_str(cfg->redis_server_password);
m += getmem_str(cfg->redis_replica_server_password);
#endif
#endif
#ifdef USE_IPSET
m += getmem_str(cfg->ipset_name_v4);
m += getmem_str(cfg->ipset_name_v6);
#endif
return m;
}
/** fast reload thread, print memory used by construct of items. */
static int
fr_printmem(struct fast_reload_thread* fr,
struct config_file* newcfg, struct fast_reload_construct* ct)
{
size_t mem = 0;
if(fr_poll_for_quit(fr))
return 1;
mem += views_get_mem(ct->views);
mem += respip_set_get_mem(ct->respip_set);
mem += auth_zones_get_mem(ct->auth_zones);
mem += forwards_get_mem(ct->fwds);
mem += hints_get_mem(ct->hints);
mem += local_zones_get_mem(ct->local_zones);
mem += acl_list_get_mem(ct->acl);
mem += acl_list_get_mem(ct->acl_interface);
mem += tcl_list_get_mem(ct->tcl);
mem += edns_strings_get_mem(ct->edns_strings);
mem += anchors_get_mem(ct->anchors);
mem += sizeof(*ct->oldcfg);
mem += config_file_getmem(newcfg);
if(!fr_output_printf(fr, "memory use %d bytes\n", (int)mem))
return 0;
fr_send_notification(fr, fast_reload_notification_printout);
return 1;
}
/** fast reload thread, setup the acl_interface for the ports that
* the server has. */
static int
ct_acl_interface_setup_ports(struct acl_list* acl_interface,
struct daemon* daemon)
{
/* clean acl_interface */
acl_interface_init(acl_interface);
if(!setup_acl_for_ports(acl_interface, daemon->ports[0]))
return 0;
if(daemon->reuseport) {
size_t i;
for(i=1; i<daemon->num_ports; i++) {
if(!setup_acl_for_ports(acl_interface,
daemon->ports[i]))
return 0;
}
}
return 1;
}
/** fast reload, add new change to list of auth zones */
static int
fr_add_auth_zone_change(struct fast_reload_thread* fr, struct auth_zone* old_z,
struct auth_zone* new_z, int is_deleted, int is_added, int is_changed)
{
struct fast_reload_auth_change* item;
item = calloc(1, sizeof(*item));
if(!item) {
log_err("malloc failure in add auth zone change");
return 0;
}
item->old_z = old_z;
item->new_z = new_z;
item->is_deleted = is_deleted;
item->is_added = is_added;
item->is_changed = is_changed;
/** See if list of auth masters is equal */
static int
xfr_masterlist_equal(struct auth_master* list1, struct auth_master* list2)
{
struct auth_master* p1 = list1, *p2 = list2;
while(p1 && p2) {
if(!xfr_auth_master_equal(p1, p2))
return 0;
p1 = p1->next;
p2 = p2->next;
}
if(!p1 && !p2)
return 1;
return 0;
}
/** See if the list of masters has changed. */
static int
xfr_masters_equal(struct auth_xfer* xfr1, struct auth_xfer* xfr2)
{
if(xfr1 == NULL && xfr2 == NULL)
return 1;
if(xfr1 == NULL && xfr2 != NULL)
return 0;
if(xfr1 != NULL && xfr2 == NULL)
return 0;
if(xfr_masterlist_equal(xfr1->task_probe->masters,
xfr2->task_probe->masters) &&
xfr_masterlist_equal(xfr1->task_transfer->masters,
xfr2->task_transfer->masters))
return 1;
return 0;
}
/** Check what has changed in auth zones, like added and deleted zones */
static int
auth_zones_check_changes(struct fast_reload_thread* fr,
struct fast_reload_construct* ct)
{
/* Check every zone in turn. */
struct auth_zone* new_z, *old_z;
struct module_env* env = &fr->worker->env;
fr->old_auth_zones = ct->auth_zones;
/* Nobody is using the new ct version yet.
* Also the ct lock is picked up before the env lock for auth_zones. */
lock_rw_rdlock(&ct->auth_zones->lock);
/* Find deleted zones by looping over the current list and looking
* up in the new tree. */
lock_rw_rdlock(&env->auth_zones->lock);
RBTREE_FOR(old_z, struct auth_zone*, &env->auth_zones->ztree) {
new_z = auth_zone_find(ct->auth_zones, old_z->name,
old_z->namelen, old_z->dclass);
if(!new_z) {
/* The zone has been removed. */
if(!fr_add_auth_zone_change(fr, old_z, NULL, 1, 0,
0)) {
lock_rw_unlock(&env->auth_zones->lock);
lock_rw_unlock(&ct->auth_zones->lock);
return 0;
}
}
}
lock_rw_unlock(&env->auth_zones->lock);
/* Find added zones by looping over new list and lookup in current. */
RBTREE_FOR(new_z, struct auth_zone*, &ct->auth_zones->ztree) {
lock_rw_rdlock(&env->auth_zones->lock);
old_z = auth_zone_find(env->auth_zones, new_z->name,
new_z->namelen, new_z->dclass);
if(!old_z) {
/* The zone has been added. */
lock_rw_unlock(&env->auth_zones->lock);
if(!fr_add_auth_zone_change(fr, NULL, new_z, 0, 1,
0)) {
lock_rw_unlock(&ct->auth_zones->lock);
return 0;
}
} else {
uint32_t old_serial = 0, new_serial = 0;
int have_old = 0, have_new = 0;
struct auth_xfer* old_xfr, *new_xfr;
lock_rw_rdlock(&new_z->lock);
lock_rw_rdlock(&old_z->lock);
new_xfr = auth_xfer_find(ct->auth_zones, new_z->name,
new_z->namelen, new_z->dclass);
old_xfr = auth_xfer_find(env->auth_zones, old_z->name,
old_z->namelen, old_z->dclass);
if(new_xfr) {
lock_basic_lock(&new_xfr->lock);
}
if(old_xfr) {
lock_basic_lock(&old_xfr->lock);
}
lock_rw_unlock(&env->auth_zones->lock);
/* Change in the auth zone can be detected. */
/* A change in serial number means that auth_xfer
* has to be updated. */
have_old = (auth_zone_get_serial(old_z,
&old_serial)!=0);
have_new = (auth_zone_get_serial(new_z,
&new_serial)!=0);
if(have_old != have_new || old_serial != new_serial
|| !xfr_masters_equal(old_xfr, new_xfr)) {
/* The zone has been changed. */
if(!fr_add_auth_zone_change(fr, old_z, new_z,
0, 0, 1)) {
lock_rw_unlock(&old_z->lock);
lock_rw_unlock(&new_z->lock);
lock_rw_unlock(&ct->auth_zones->lock);
if(new_xfr) {
lock_basic_unlock(&new_xfr->lock);
}
if(old_xfr) {
lock_basic_unlock(&old_xfr->lock);
}
return 0;
}
}
/** fast reload thread, construct from config the new items */
static int
fr_construct_from_config(struct fast_reload_thread* fr,
struct config_file* newcfg, struct fast_reload_construct* ct)
{
int have_view_respip_cfg = 0;
/* The xtree is not swapped. This contains the auth_xfer elements
* that contain tasks in progress, like zone transfers.
* The unchanged zones can keep their tasks in the tree, and thus
* the xfer elements can continue to be their callbacks. */
}
#if defined(ATOMIC_POINTER_LOCK_FREE) && defined(HAVE_LINK_ATOMIC_STORE)
/** Fast reload thread, if atomics are available, copy the config items
* one by one with atomic store operations. */
static void
fr_atomic_copy_cfg(struct config_file* oldcfg, struct config_file* cfg,
struct config_file* newcfg)
{
#define COPY_VAR_int(var) oldcfg->var = cfg->var; atomic_store((_Atomic int*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_ptr(var) oldcfg->var = cfg->var; atomic_store((void* _Atomic*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_unsigned_int(var) oldcfg->var = cfg->var; atomic_store((_Atomic unsigned*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_size_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic size_t*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_uint8_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint8_t*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_uint16_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint16_t*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_uint32_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint32_t*)&cfg->var, newcfg->var); newcfg->var = 0;
#define COPY_VAR_int32_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic int32_t*)&cfg->var, newcfg->var); newcfg->var = 0;
/* If config file items are missing from this list, they are
* not updated by fast-reload +p. */
/* For missing items, the oldcfg item is not updated, still NULL,
* and the cfg stays the same. The newcfg item is untouched.
* The newcfg item is then deleted later. */
/* Items that need synchronisation are omitted from the list.
* Use fast-reload without +p to update them together. */
COPY_VAR_int(verbosity);
COPY_VAR_int(stat_interval);
COPY_VAR_int(stat_cumulative);
COPY_VAR_int(stat_extended);
COPY_VAR_int(stat_inhibit_zero);
COPY_VAR_int(num_threads);
COPY_VAR_int(port);
COPY_VAR_int(do_ip4);
COPY_VAR_int(do_ip6);
COPY_VAR_int(do_nat64);
COPY_VAR_int(prefer_ip4);
COPY_VAR_int(prefer_ip6);
COPY_VAR_int(do_udp);
COPY_VAR_int(do_tcp);
COPY_VAR_size_t(max_reuse_tcp_queries);
COPY_VAR_int(tcp_reuse_timeout);
COPY_VAR_int(tcp_auth_query_timeout);
COPY_VAR_int(tcp_upstream);
COPY_VAR_int(udp_upstream_without_downstream);
COPY_VAR_int(tcp_mss);
COPY_VAR_int(outgoing_tcp_mss);
COPY_VAR_int(tcp_idle_timeout);
COPY_VAR_int(do_tcp_keepalive);
COPY_VAR_int(tcp_keepalive_timeout);
COPY_VAR_int(sock_queue_timeout);
COPY_VAR_ptr(proxy_protocol_port);
COPY_VAR_ptr(ssl_service_key);
COPY_VAR_ptr(ssl_service_pem);
COPY_VAR_int(ssl_port);
COPY_VAR_int(ssl_upstream);
COPY_VAR_ptr(tls_cert_bundle);
COPY_VAR_int(tls_win_cert);
COPY_VAR_ptr(tls_additional_port);
/* The first is used to walk throught the list but last is
* only used during config read. */
COPY_VAR_ptr(tls_session_ticket_keys.first);
COPY_VAR_ptr(tls_session_ticket_keys.last);
COPY_VAR_ptr(tls_ciphers);
COPY_VAR_ptr(tls_ciphersuites);
COPY_VAR_int(tls_use_sni);
COPY_VAR_int(https_port);
COPY_VAR_ptr(http_endpoint);
COPY_VAR_uint32_t(http_max_streams);
COPY_VAR_size_t(http_query_buffer_size);
COPY_VAR_size_t(http_response_buffer_size);
COPY_VAR_int(http_nodelay);
COPY_VAR_int(http_notls_downstream);
COPY_VAR_int(outgoing_num_ports);
COPY_VAR_size_t(outgoing_num_tcp);
COPY_VAR_size_t(incoming_num_tcp);
COPY_VAR_ptr(outgoing_avail_ports);
COPY_VAR_size_t(edns_buffer_size);
COPY_VAR_size_t(stream_wait_size);
COPY_VAR_size_t(msg_buffer_size);
COPY_VAR_size_t(msg_cache_size);
COPY_VAR_size_t(msg_cache_slabs);
COPY_VAR_size_t(num_queries_per_thread);
COPY_VAR_size_t(jostle_time);
COPY_VAR_size_t(rrset_cache_size);
COPY_VAR_size_t(rrset_cache_slabs);
COPY_VAR_int(host_ttl);
COPY_VAR_size_t(infra_cache_slabs);
COPY_VAR_size_t(infra_cache_numhosts);
COPY_VAR_int(infra_cache_min_rtt);
COPY_VAR_int(infra_cache_max_rtt);
COPY_VAR_int(infra_keep_probing);
COPY_VAR_int(delay_close);
COPY_VAR_int(udp_connect);
COPY_VAR_ptr(target_fetch_policy);
COPY_VAR_int(fast_server_permil);
COPY_VAR_size_t(fast_server_num);
COPY_VAR_int(if_automatic);
COPY_VAR_ptr(if_automatic_ports);
COPY_VAR_size_t(so_rcvbuf);
COPY_VAR_size_t(so_sndbuf);
COPY_VAR_int(so_reuseport);
COPY_VAR_int(ip_transparent);
COPY_VAR_int(ip_freebind);
COPY_VAR_int(ip_dscp);
/* Not copied because the length and items could then not match.
num_ifs, ifs, num_out_ifs, out_ifs
*/
COPY_VAR_ptr(root_hints);
COPY_VAR_ptr(stubs);
COPY_VAR_ptr(forwards);
COPY_VAR_ptr(auths);
COPY_VAR_ptr(views);
COPY_VAR_ptr(donotqueryaddrs);
#ifdef CLIENT_SUBNET
COPY_VAR_ptr(client_subnet);
COPY_VAR_ptr(client_subnet_zone);
COPY_VAR_uint16_t(client_subnet_opcode);
COPY_VAR_int(client_subnet_always_forward);
COPY_VAR_uint8_t(max_client_subnet_ipv4);
COPY_VAR_uint8_t(max_client_subnet_ipv6);
COPY_VAR_uint8_t(min_client_subnet_ipv4);
COPY_VAR_uint8_t(min_client_subnet_ipv6);
COPY_VAR_uint32_t(max_ecs_tree_size_ipv4);
COPY_VAR_uint32_t(max_ecs_tree_size_ipv6);
#endif
COPY_VAR_ptr(acls);
COPY_VAR_int(donotquery_localhost);
COPY_VAR_ptr(tcp_connection_limits);
COPY_VAR_int(harden_short_bufsize);
COPY_VAR_int(harden_large_queries);
COPY_VAR_int(harden_glue);
COPY_VAR_int(harden_dnssec_stripped);
COPY_VAR_int(harden_below_nxdomain);
COPY_VAR_int(harden_referral_path);
COPY_VAR_int(harden_algo_downgrade);
COPY_VAR_int(harden_unknown_additional);
COPY_VAR_int(use_caps_bits_for_id);
COPY_VAR_ptr(caps_whitelist);
COPY_VAR_ptr(private_address);
COPY_VAR_ptr(private_domain);
COPY_VAR_size_t(unwanted_threshold);
COPY_VAR_int(max_ttl);
COPY_VAR_int(min_ttl);
COPY_VAR_int(max_negative_ttl);
COPY_VAR_int(min_negative_ttl);
COPY_VAR_int(prefetch);
COPY_VAR_int(prefetch_key);
COPY_VAR_int(deny_any);
COPY_VAR_ptr(chrootdir);
COPY_VAR_ptr(username);
COPY_VAR_ptr(directory);
COPY_VAR_ptr(logfile);
COPY_VAR_ptr(pidfile);
COPY_VAR_int(use_syslog);
COPY_VAR_int(log_time_ascii);
COPY_VAR_int(log_queries);
COPY_VAR_int(log_replies);
COPY_VAR_int(log_tag_queryreply);
COPY_VAR_int(log_local_actions);
COPY_VAR_int(log_servfail);
COPY_VAR_ptr(log_identity);
COPY_VAR_int(log_destaddr);
COPY_VAR_int(hide_identity);
COPY_VAR_int(hide_version);
COPY_VAR_int(hide_trustanchor);
COPY_VAR_int(hide_http_user_agent);
COPY_VAR_ptr(identity);
COPY_VAR_ptr(version);
COPY_VAR_ptr(http_user_agent);
COPY_VAR_ptr(nsid_cfg_str);
/* Not copied because the length and items could then not match.
nsid;
nsid_len;
*/
COPY_VAR_ptr(module_conf);
COPY_VAR_ptr(trust_anchor_file_list);
COPY_VAR_ptr(trust_anchor_list);
COPY_VAR_ptr(auto_trust_anchor_file_list);
COPY_VAR_ptr(trusted_keys_file_list);
COPY_VAR_ptr(domain_insecure);
COPY_VAR_int(trust_anchor_signaling);
COPY_VAR_int(root_key_sentinel);
COPY_VAR_int32_t(val_date_override);
COPY_VAR_int32_t(val_sig_skew_min);
COPY_VAR_int32_t(val_sig_skew_max);
COPY_VAR_int32_t(val_max_restart);
COPY_VAR_int(bogus_ttl);
COPY_VAR_int(val_clean_additional);
COPY_VAR_int(val_log_level);
COPY_VAR_int(val_log_squelch);
COPY_VAR_int(val_permissive_mode);
COPY_VAR_int(aggressive_nsec);
COPY_VAR_int(ignore_cd);
COPY_VAR_int(disable_edns_do);
COPY_VAR_int(serve_expired);
COPY_VAR_int(serve_expired_ttl);
COPY_VAR_int(serve_expired_ttl_reset);
COPY_VAR_int(serve_expired_reply_ttl);
COPY_VAR_int(serve_expired_client_timeout);
COPY_VAR_int(ede_serve_expired);
COPY_VAR_int(dns_error_reporting);
COPY_VAR_int(serve_original_ttl);
COPY_VAR_ptr(val_nsec3_key_iterations);
COPY_VAR_int(zonemd_permissive_mode);
COPY_VAR_unsigned_int(add_holddown);
COPY_VAR_unsigned_int(del_holddown);
COPY_VAR_unsigned_int(keep_missing);
COPY_VAR_int(permit_small_holddown);
COPY_VAR_size_t(key_cache_size);
COPY_VAR_size_t(key_cache_slabs);
COPY_VAR_size_t(neg_cache_size);
COPY_VAR_ptr(local_zones);
COPY_VAR_ptr(local_zones_nodefault);
#ifdef USE_IPSET
COPY_VAR_ptr(local_zones_ipset);
#endif
COPY_VAR_int(local_zones_disable_default);
COPY_VAR_ptr(local_data);
COPY_VAR_ptr(local_zone_overrides);
COPY_VAR_int(unblock_lan_zones);
COPY_VAR_int(insecure_lan_zones);
/* These reference tags
COPY_VAR_ptr(local_zone_tags);
COPY_VAR_ptr(acl_tags);
COPY_VAR_ptr(acl_tag_actions);
COPY_VAR_ptr(acl_tag_datas);
*/
COPY_VAR_ptr(acl_view);
COPY_VAR_ptr(interface_actions);
/* These reference tags
COPY_VAR_ptr(interface_tags);
COPY_VAR_ptr(interface_tag_actions);
COPY_VAR_ptr(interface_tag_datas);
*/
COPY_VAR_ptr(interface_view);
/* This references tags
COPY_VAR_ptr(respip_tags);
*/
COPY_VAR_ptr(respip_actions);
COPY_VAR_ptr(respip_data);
/* Not copied because the length and items could then not match.
* also the respip module keeps a pointer to the array in its state.
tagname, num_tags
*/
COPY_VAR_int(remote_control_enable);
/* The first is used to walk throught the list but last is
* only used during config read. */
COPY_VAR_ptr(control_ifs.first);
COPY_VAR_ptr(control_ifs.last);
COPY_VAR_int(control_use_cert);
COPY_VAR_int(control_port);
COPY_VAR_ptr(server_key_file);
COPY_VAR_ptr(server_cert_file);
COPY_VAR_ptr(control_key_file);
COPY_VAR_ptr(control_cert_file);
COPY_VAR_ptr(python_script);
COPY_VAR_ptr(dynlib_file);
COPY_VAR_int(use_systemd);
COPY_VAR_int(do_daemonize);
COPY_VAR_int(minimal_responses);
COPY_VAR_int(rrset_roundrobin);
COPY_VAR_int(unknown_server_time_limit);
COPY_VAR_int(discard_timeout);
COPY_VAR_int(wait_limit);
COPY_VAR_int(wait_limit_cookie);
COPY_VAR_ptr(wait_limit_netblock);
COPY_VAR_ptr(wait_limit_cookie_netblock);
COPY_VAR_size_t(max_udp_size);
COPY_VAR_ptr(dns64_prefix);
COPY_VAR_int(dns64_synthall);
COPY_VAR_ptr(dns64_ignore_aaaa);
COPY_VAR_ptr(nat64_prefix);
COPY_VAR_int(dnstap);
COPY_VAR_int(dnstap_bidirectional);
COPY_VAR_ptr(dnstap_socket_path);
COPY_VAR_ptr(dnstap_ip);
COPY_VAR_int(dnstap_tls);
COPY_VAR_ptr(dnstap_tls_server_name);
COPY_VAR_ptr(dnstap_tls_cert_bundle);
COPY_VAR_ptr(dnstap_tls_client_key_file);
COPY_VAR_ptr(dnstap_tls_client_cert_file);
COPY_VAR_int(dnstap_send_identity);
COPY_VAR_int(dnstap_send_version);
COPY_VAR_ptr(dnstap_identity);
COPY_VAR_ptr(dnstap_version);
COPY_VAR_int(dnstap_sample_rate);
COPY_VAR_int(dnstap_log_resolver_query_messages);
COPY_VAR_int(dnstap_log_resolver_response_messages);
COPY_VAR_int(dnstap_log_client_query_messages);
COPY_VAR_int(dnstap_log_client_response_messages);
COPY_VAR_int(dnstap_log_forwarder_query_messages);
COPY_VAR_int(dnstap_log_forwarder_response_messages);
COPY_VAR_int(disable_dnssec_lame_check);
COPY_VAR_int(ip_ratelimit);
COPY_VAR_int(ip_ratelimit_cookie);
COPY_VAR_size_t(ip_ratelimit_slabs);
COPY_VAR_size_t(ip_ratelimit_size);
COPY_VAR_int(ip_ratelimit_factor);
COPY_VAR_int(ip_ratelimit_backoff);
COPY_VAR_int(ratelimit);
COPY_VAR_size_t(ratelimit_slabs);
COPY_VAR_size_t(ratelimit_size);
COPY_VAR_ptr(ratelimit_for_domain);
COPY_VAR_ptr(ratelimit_below_domain);
COPY_VAR_int(ratelimit_factor);
COPY_VAR_int(ratelimit_backoff);
COPY_VAR_int(outbound_msg_retry);
COPY_VAR_int(max_sent_count);
COPY_VAR_int(max_query_restarts);
COPY_VAR_int(qname_minimisation);
COPY_VAR_int(qname_minimisation_strict);
COPY_VAR_int(shm_enable);
COPY_VAR_int(shm_key);
COPY_VAR_ptr(edns_client_strings);
COPY_VAR_uint16_t(edns_client_string_opcode);
COPY_VAR_int(dnscrypt);
COPY_VAR_int(dnscrypt_port);
COPY_VAR_ptr(dnscrypt_provider);
COPY_VAR_ptr(dnscrypt_secret_key);
COPY_VAR_ptr(dnscrypt_provider_cert);
COPY_VAR_ptr(dnscrypt_provider_cert_rotated);
COPY_VAR_size_t(dnscrypt_shared_secret_cache_size);
COPY_VAR_size_t(dnscrypt_shared_secret_cache_slabs);
COPY_VAR_size_t(dnscrypt_nonce_cache_size);
COPY_VAR_size_t(dnscrypt_nonce_cache_slabs);
COPY_VAR_int(pad_responses);
COPY_VAR_size_t(pad_responses_block_size);
COPY_VAR_int(pad_queries);
COPY_VAR_size_t(pad_queries_block_size);
#ifdef USE_IPSECMOD
COPY_VAR_int(ipsecmod_enabled);
COPY_VAR_ptr(ipsecmod_whitelist);
COPY_VAR_ptr(ipsecmod_hook);
COPY_VAR_int(ipsecmod_ignore_bogus);
COPY_VAR_int(ipsecmod_max_ttl);
COPY_VAR_int(ipsecmod_strict);
#endif
#ifdef USE_CACHEDB
COPY_VAR_ptr(cachedb_backend);
COPY_VAR_ptr(cachedb_secret);
COPY_VAR_int(cachedb_no_store);
COPY_VAR_int(cachedb_check_when_serve_expired);
#ifdef USE_REDIS
COPY_VAR_ptr(redis_server_host);
COPY_VAR_ptr(redis_replica_server_host);
COPY_VAR_int(redis_server_port);
COPY_VAR_int(redis_replica_server_port);
COPY_VAR_ptr(redis_server_path);
COPY_VAR_ptr(redis_replica_server_path);
COPY_VAR_ptr(redis_server_password);
COPY_VAR_ptr(redis_replica_server_password);
COPY_VAR_int(redis_timeout);
COPY_VAR_int(redis_replica_timeout);
COPY_VAR_int(redis_command_timeout);
COPY_VAR_int(redis_replica_command_timeout);
COPY_VAR_int(redis_connect_timeout);
COPY_VAR_int(redis_replica_connect_timeout);
COPY_VAR_int(redis_expire_records);
COPY_VAR_int(redis_logical_db);
COPY_VAR_int(redis_replica_logical_db);
#endif
#endif
COPY_VAR_int(do_answer_cookie);
/* Not copied because the length and content could then not match.
cookie_secret[40], cookie_secret_len
*/
#ifdef USE_IPSET
COPY_VAR_ptr(ipset_name_v4);
COPY_VAR_ptr(ipset_name_v6);
#endif
COPY_VAR_int(ede);
}
#endif /* ATOMIC_POINTER_LOCK_FREE && HAVE_LINK_ATOMIC_STORE */
/** fast reload thread, adjust the iterator env */
static void
fr_adjust_iter_env(struct module_env* env, struct fast_reload_construct* ct)
{
int m;
struct iter_env* iter_env = NULL;
/* There is no comparison here to see if no options changed and thus
* no swap is needed, the trees with addresses and domains can be
* large and that would take too long. Instead the trees are
* swapped in. */
/* Because the iterator env is not locked, the update cannot happen
* when fr nopause is used. Without it the fast reload pauses the
* other threads, so they are not currently using the structure. */
m = modstack_find(env->modstack, "iterator");
if(m != -1) iter_env = (struct iter_env*)env->modinfo[m];
if(iter_env) {
/* Swap the data so that the delete happens afterwards. */
int* oldtargetfetchpolicy = iter_env->target_fetch_policy;
int oldmaxdependencydepth = iter_env->max_dependency_depth;
struct iter_donotq* olddonotq = iter_env->donotq;
struct iter_priv* oldpriv = iter_env->priv;
struct rbtree_type* oldcapswhite = iter_env->caps_white;
struct iter_nat64 oldnat64 = iter_env->nat64;
/* Because the validator env is not locked, the update cannot happen
* when fr nopause is used. Without it the fast reload pauses the
* other threads, so they are not currently using the structure. */
m = modstack_find(env->modstack, "validator");
if(m != -1) val_env = (struct val_env*)env->modinfo[m];
if(val_env) {
/* Swap the arrays so that the delete happens afterwards. */
size_t* oldkeysize = val_env->nsec3_keysize;
size_t* oldmaxiter = val_env->nsec3_maxiter;
val_env->nsec3_keysize = NULL;
val_env->nsec3_maxiter = NULL;
val_env_apply_cfg(val_env, env->cfg, ct->nsec3_keysize,
ct->nsec3_maxiter, ct->nsec3_keyiter_count);
ct->nsec3_keysize = oldkeysize;
ct->nsec3_maxiter = oldmaxiter;
if(env->neg_cache) {
lock_basic_lock(&env->neg_cache->lock);
env->neg_cache->nsec3_max_iter = val_env->
nsec3_maxiter[val_env->nsec3_keyiter_count-1];
lock_basic_unlock(&env->neg_cache->lock);
}
}
}
/* The size of the infra cache and ip rates is changed
* in fr_adjust_cache. */
infra->host_ttl = cfg->host_ttl;
infra->infra_keep_probing = cfg->infra_keep_probing;
infra_dp_ratelimit = cfg->ratelimit;
infra_ip_ratelimit = cfg->ip_ratelimit;
infra_ip_ratelimit_cookie = cfg->ip_ratelimit_cookie;
infra->wait_limits_netblock = ct->wait_limits_netblock;
infra->wait_limits_cookie_netblock = ct->wait_limits_cookie_netblock;
infra->domain_limits = ct->domain_limits;
/** fast reload thread, reload config with putting the new config items
* in place and swapping out the old items. */
static int
fr_reload_config(struct fast_reload_thread* fr, struct config_file* newcfg,
struct fast_reload_construct* ct)
{
struct daemon* daemon = fr->worker->daemon;
struct module_env* env = daemon->env;
/* These are constructed in the fr_construct_from_config routine. */
log_assert(ct->oldcfg);
log_assert(ct->fwds);
log_assert(ct->hints);
if(env->cfg->log_identity || ct->oldcfg->log_identity) {
/* pick up new log_identity string to use for log output. */
log_ident_set_or_default(env->cfg->log_identity);
}
/* the newcfg elements are in env->cfg, so should not be freed here. */
#if defined(ATOMIC_POINTER_LOCK_FREE) && defined(HAVE_LINK_ATOMIC_STORE)
/* if used, the routine that copies the config has zeroed items. */
if(!fr->fr_nopause)
#endif
memset(newcfg, 0, sizeof(*newcfg));
/* Quickly swap the tree roots themselves with the already allocated
* elements. This is a quick swap operation on the pointer.
* The other threads are stopped and locks are held, so that a
* consistent view of the configuration, before, and after, exists
* towards the state machine for query resolution. */
forwards_swap_tree(env->fwds, ct->fwds);
hints_swap_tree(env->hints, ct->hints);
views_swap_tree(env->views, ct->views);
acl_list_swap_tree(daemon->acl, ct->acl);
acl_list_swap_tree(daemon->acl_interface, ct->acl_interface);
tcl_list_swap_tree(daemon->tcl, ct->tcl);
local_zones_swap_tree(daemon->local_zones, ct->local_zones);
respip_set_swap_tree(env->respip_set, ct->respip_set);
daemon->use_response_ip = ct->use_response_ip;
daemon->use_rpz = ct->use_rpz;
auth_zones_swap(env->auth_zones, ct->auth_zones);
edns_strings_swap_tree(env->edns_strings, ct->edns_strings);
anchors_swap_tree(env->anchors, ct->anchors);
#ifdef USE_CACHEDB
daemon->env->cachedb_enabled = cachedb_is_enabled(&daemon->mods,
daemon->env);
#endif
#ifdef USE_DNSTAP
if(env->cfg->dnstap) {
if(!fr->fr_nopause)
dt_apply_cfg(daemon->dtenv, env->cfg);
else dt_apply_logcfg(daemon->dtenv, env->cfg);
}
#endif
fr_adjust_cache(env, ct->oldcfg);
if(!fr->fr_nopause) {
fr_adjust_iter_env(env, ct);
fr_adjust_val_env(env, ct, ct->oldcfg);
fr_adjust_infra(env, ct);
}
/* Set globals with new config. */
config_apply(env->cfg);
if(fr_poll_for_quit(fr)) {
config_delete(newcfg);
fr_construct_clear(&ct);
return 1;
}
if(fr->fr_nopause) {
/* Poll every thread, with a no-work poll item over the
* command pipe. This makes the worker thread surely move
* to deal with that event, and thus the thread is no longer
* holding, eg. a string item from the old config struct.
* And then the old config struct can safely be deleted.
* Only needed when nopause is used, because without that
* the worker threads are already waiting on a command pipe
* item. This nopause command pipe item does not take work,
* it returns immediately, so it does not delay the workers.
* They can be polled one at a time. But its processing causes
* the worker to have released data items from old config.
* This also makes sure the threads are not holding locks on
* individual items in the local_zones, views, respip_set. */
fr_send_notification(fr,
fast_reload_notification_reload_nopause_poll);
fr_poll_for_ack(fr);
}
if(!fr_output_printf(fast_reload_thread, "ok\n"))
goto done_error;
fr_send_notification(fast_reload_thread,
fast_reload_notification_printout);
verbose(VERB_ALGO, "stop fast reload thread");
/* If this is not an exit due to quit earlier, send regular done. */
if(!fast_reload_thread->need_to_quit)
fr_send_notification(fast_reload_thread,
fast_reload_notification_done);
/* If during the fast_reload_notification_done send,
* fast_reload_notification_exit was received, ack it. If the
* thread is exiting due to quit received earlier, also ack it.*/
done:
if(fast_reload_thread->need_to_quit)
fr_send_notification(fast_reload_thread,
fast_reload_notification_exited);
return NULL;
done_error:
verbose(VERB_ALGO, "stop fast reload thread with done_error");
fr_send_notification(fast_reload_thread,
fast_reload_notification_done_error);
return NULL;
}
#endif /* !THREADS_DISABLED */
/** fast reload thread. setup the thread info */
static int
fast_reload_thread_setup(struct worker* worker, int fr_verb, int fr_nopause,
int fr_drop_mesh)
{
struct fast_reload_thread* fr;
int numworkers = worker->daemon->num;
worker->daemon->fast_reload_thread = (struct fast_reload_thread*)
calloc(1, sizeof(*worker->daemon->fast_reload_thread));
if(!worker->daemon->fast_reload_thread)
return 0;
fr = worker->daemon->fast_reload_thread;
fr->fr_verb = fr_verb;
fr->fr_nopause = fr_nopause;
fr->fr_drop_mesh = fr_drop_mesh;
worker->daemon->fast_reload_drop_mesh = fr->fr_drop_mesh;
/* The thread id printed in logs, numworker+1 is the dnstap thread.
* This is numworkers+2. */
fr->threadnum = numworkers+2;
fr->commpair[0] = -1;
fr->commpair[1] = -1;
fr->commreload[0] = -1;
fr->commreload[1] = -1;
if(!create_socketpair(fr->commpair, worker->daemon->rand)) {
free(fr);
worker->daemon->fast_reload_thread = NULL;
return 0;
}
fr->worker = worker;
fr->fr_output = (struct config_strlist_head*)calloc(1,
sizeof(*fr->fr_output));
if(!fr->fr_output) {
sock_close(fr->commpair[0]);
sock_close(fr->commpair[1]);
free(fr);
worker->daemon->fast_reload_thread = NULL;
return 0;
}
if(!create_socketpair(fr->commreload, worker->daemon->rand)) {
sock_close(fr->commpair[0]);
sock_close(fr->commpair[1]);
free(fr->fr_output);
free(fr);
worker->daemon->fast_reload_thread = NULL;
return 0;
}
lock_basic_init(&fr->fr_output_lock);
lock_protect(&fr->fr_output_lock, fr->fr_output,
sizeof(*fr->fr_output));
return 1;
}
/** fast reload, delete auth zone change list */
static void
fr_auth_change_list_delete(
struct fast_reload_auth_change* auth_zone_change_list)
{
struct fast_reload_auth_change* item, *next;
item = auth_zone_change_list;
while(item) {
next = item->next;
free(item);
item = next;
}
}
/** fast reload thread. desetup and delete the thread info. */
static void
fast_reload_thread_desetup(struct fast_reload_thread* fast_reload_thread)
{
if(!fast_reload_thread)
return;
if(fast_reload_thread->service_event &&
fast_reload_thread->service_event_is_added) {
ub_event_del(fast_reload_thread->service_event);
fast_reload_thread->service_event_is_added = 0;
}
if(fast_reload_thread->service_event)
ub_event_free(fast_reload_thread->service_event);
sock_close(fast_reload_thread->commpair[0]);
sock_close(fast_reload_thread->commpair[1]);
sock_close(fast_reload_thread->commreload[0]);
sock_close(fast_reload_thread->commreload[1]);
if(fast_reload_thread->printq) {
fr_main_perform_printout(fast_reload_thread);
/* If it is empty now, there is nothing to print on fd. */
if(fr_printq_empty(fast_reload_thread->printq)) {
fr_printq_delete(fast_reload_thread->printq);
} else {
/* Keep the printq around to printout the remaining
* text to the remote client. Until it is done, it
* sits on a list, that is in the daemon struct.
* The event can then spool the remaining text to the
* remote client and eventually delete itself from the
* callback. */
fr_printq_list_insert(fast_reload_thread->printq,
fast_reload_thread->worker->daemon);
fast_reload_thread->printq = NULL;
}
}
lock_basic_destroy(&fast_reload_thread->fr_output_lock);
if(fast_reload_thread->fr_output) {
config_delstrlist(fast_reload_thread->fr_output->first);
free(fast_reload_thread->fr_output);
}
fr_auth_change_list_delete(fast_reload_thread->auth_zone_change_list);
free(fast_reload_thread);
}
/**
* Fast reload thread, send a command to the thread. Blocking on timeout.
* It handles received input from the thread, if any is received.
*/
static void
fr_send_cmd_to(struct fast_reload_thread* fr,
enum fast_reload_notification status, int check_cmds, int blocking)
{
int outevent, loopexit = 0, bcount = 0;
uint32_t cmd;
ssize_t ret;
verbose(VERB_ALGO, "send notification to fast reload thread: %s",
fr_notification_to_string(status));
cmd = status;
while(1) {
if(++loopexit > IPC_LOOP_MAX) {
log_err("send notification to fast reload: could not send notification: loop");
return;
}
if(check_cmds)
fr_check_cmd_from_thread(fr);
/* wait for socket to become writable */
if(!sock_poll_timeout(fr->commpair[0],
(blocking?-1:IPC_NOTIFICATION_WAIT),
0, 1, &outevent)) {
log_err("send notification to fast reload: poll failed");
return;
}
if(!outevent)
continue;
/* keep static analyzer happy; send(-1,..) */
log_assert(fr->commpair[0] >= 0);
ret = send(fr->commpair[0], ((char*)&cmd)+bcount,
sizeof(cmd)-bcount, 0);
if(ret == -1) {
if(
#ifndef USE_WINSOCK
errno == EINTR || errno == EAGAIN
# ifdef EWOULDBLOCK
|| errno == EWOULDBLOCK
# endif
#else
WSAGetLastError() == WSAEINTR ||
WSAGetLastError() == WSAEINPROGRESS ||
WSAGetLastError() == WSAEWOULDBLOCK
#endif
)
continue; /* Try again. */
log_err("send notification to fast reload: send: %s",
sock_strerror(errno));
return;
} else if(ret+(ssize_t)bcount != sizeof(cmd)) {
bcount += ret;
if((size_t)bcount < sizeof(cmd))
continue;
}
break;
}
}
/** Fast reload, the main thread handles that the fast reload thread has
* exited. */
static void
fr_main_perform_done(struct fast_reload_thread* fr)
{
struct worker* worker = fr->worker;
verbose(VERB_ALGO, "join with fastreload thread");
ub_thread_join(fr->tid);
verbose(VERB_ALGO, "joined with fastreload thread");
fast_reload_thread_desetup(fr);
worker->daemon->fast_reload_thread = NULL;
}
/** Fast reload, the remote control thread handles that the fast reload thread
* has output to be printed, on the linked list that is locked. */
static void
fr_main_perform_printout(struct fast_reload_thread* fr)
{
struct config_strlist_head out;
/* Fetch the list of items to be printed */
lock_basic_lock(&fr->fr_output_lock);
out.first = fr->fr_output->first;
out.last = fr->fr_output->last;
fr->fr_output->first = NULL;
fr->fr_output->last = NULL;
lock_basic_unlock(&fr->fr_output_lock);
if(!fr->printq || !fr->printq->client_cp) {
/* There is no output socket, delete it. */
config_delstrlist(out.first);
return;
}
/* Put them on the output list, not locked because the list
* producer and consumer are both owned by the remote control thread,
* it moves the items to the list for printing in the event callback
* for the client_cp. */
cfg_strlist_append_listhead(fr->printq->to_print, &out);
/* Set the client_cp to output if not already */
if(!fr->printq->client_cp->event_added)
comm_point_listen_for_rw(fr->printq->client_cp, 0, 1);
}
/** fast reload, receive ack from workers that they are waiting, run
* by the mainthr after sending them reload_stop. */
static void
fr_read_ack_from_workers(struct fast_reload_thread* fr)
{
struct daemon* daemon = fr->worker->daemon;
/* Every worker sends one byte, wait for num-1 bytes. */
int count=0, total=daemon->num-1;
while(count < total) {
uint8_t r;
ssize_t ret;
ret = recv(fr->commreload[0], (void*)&r, 1, 0);
if(ret == -1) {
if(
#ifndef USE_WINSOCK
errno == EINTR || errno == EAGAIN
# ifdef EWOULDBLOCK
|| errno == EWOULDBLOCK
# endif
#else
WSAGetLastError() == WSAEINTR ||
WSAGetLastError() == WSAEINPROGRESS ||
WSAGetLastError() == WSAEWOULDBLOCK
#endif
)
continue; /* Try again */
log_err("worker reload ack: recv failed: %s",
sock_strerror(errno));
return;
}
count++;
verbose(VERB_ALGO, "worker reload ack from (uint8_t)%d",
(int)r);
}
}
/** fast reload, poll for reload_start in mainthr waiting on a notification
* from the fast reload thread. */
static void
fr_poll_for_reload_start(struct fast_reload_thread* fr)
{
int loopexit = 0, bcount = 0;
uint32_t cmd;
ssize_t ret;
/* Is there data? */
if(!sock_poll_timeout(fr->commpair[0], -1, 1, 0, NULL)) {
log_err("fr_poll_for_reload_start: poll failed");
return;
}
/**
* Remove the old tcl_addr entries from the open connections.
* They are only incremented when an accept is performed on a tcp comm point.
* @param front: listening comm ports of the worker.
*/
static void
tcl_remove_old(struct listen_dnsport* front)
{
struct listen_list* l;
l = front->cps;
while(l) {
if(l->com->type == comm_tcp_accept) {
int i;
for(i=0; i<l->com->max_tcp_count; i++) {
if(l->com->tcp_handlers[i]->tcl_addr) {
/* Because the increment of the
* connection limit was in the old
* tcl list, the new list does not
* need a decrement. With NULL it is
* not decremented when the connection
* is done, and also there is no
* reference to the old connection
* limit structure. */
l->com->tcp_handlers[i]->tcl_addr =
NULL;
}
}
}
l = l->next;
}
}
/** Fast reload, worker picks up deleted auth zone */
static void
fr_worker_auth_del(struct worker* worker, struct fast_reload_auth_change* item,
int for_change)
{
int released = 0; /* Did this routine release callbacks. */
struct auth_xfer* xfr = NULL;
lock_rw_wrlock(&item->old_z->lock);
if(item->old_z->zonemd_callback_env &&
item->old_z->zonemd_callback_env->worker == worker){
/* This worker was performing a zonemd lookup,
* stop the lookup and remove that entry. */
auth_zone_zonemd_stop_lookup(item->old_z, worker->env.mesh);
item->old_z->zonemd_callback_env = NULL;
}
lock_rw_unlock(&item->old_z->lock);
fr_pickup_auth_locks(worker, item->old_z, item->old_z, NULL, &xfr,
NULL);
lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
lock_rw_unlock(&worker->env.auth_zones->lock);
lock_rw_unlock(&item->old_z->lock);
if(xfr) {
/* Release callbacks on the xfr, if this worker holds them. */
if(xfr->task_nextprobe->worker == worker ||
xfr->task_probe->worker == worker ||
xfr->task_transfer->worker == worker) {
released = 1;
xfr_disown_tasks(xfr, worker);
}
lock_basic_unlock(&xfr->lock);
}
/* The lists can be swapped in, the other xfr struct will be deleted
* afterwards. */
probe_masters = xfr->task_probe->masters;
transfer_masters = xfr->task_transfer->masters;
xfr->task_probe->masters = loadxfr->task_probe->masters;
xfr->task_transfer->masters = loadxfr->task_transfer->masters;
loadxfr->task_probe->masters = probe_masters;
loadxfr->task_transfer->masters = transfer_masters;
}
/** Fast reload, worker picks up added auth zone */
static void
fr_worker_auth_add(struct worker* worker, struct fast_reload_auth_change* item,
int for_change)
{
struct auth_xfer* xfr = NULL, *loadxfr = NULL;
/* Start zone transfers and lookups. */
fr_pickup_auth_locks(worker, item->new_z, NULL, item->new_z, &xfr,
&loadxfr);
if(xfr == NULL && item->new_z->zone_is_slave) {
/* The xfr item needs to be created. The auth zones lock
* is held to make this possible. */
xfr = auth_xfer_create(worker->env.auth_zones, item->new_z);
auth_xfr_pickup_config(loadxfr, xfr);
/* Serial information is copied into the xfr struct. */
if(!xfr_find_soa(item->new_z, xfr)) {
xfr->serial = 0;
}
} else if(for_change && xfr) {
if(!xfr_find_soa(item->new_z, xfr)) {
xfr->serial = 0;
}
}
lock_rw_unlock(&item->new_z->lock);
lock_rw_unlock(&worker->env.auth_zones->lock);
lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
if(loadxfr) {
lock_basic_unlock(&loadxfr->lock);
}
if(xfr) {
auth_xfer_pickup_initial_zone(xfr, &worker->env);
if(for_change) {
xfr->task_probe->only_lookup = 0;
}
lock_basic_unlock(&xfr->lock);
}
/** Fast reload, worker picks up changed auth zone */
static void
fr_worker_auth_cha(struct worker* worker, struct fast_reload_auth_change* item)
{
int todelete = 0;
struct auth_xfer* loadxfr = NULL, *xfr = NULL;
/* Since the zone has been changed, by rereading it from zone file,
* existing transfers and probes are likely for the old version.
* Stop them, and start new ones if needed. */
fr_worker_auth_del(worker, item, 1);
if(worker->thread_num != 0)
return;
/* The old callbacks are stopped, tasks have been disowned. The
* new config contents can be picked up. SOA information is picked
* up in the auth_add routine, as it has the new_z ready. */
/* The xfr is not there any more if the zone is not set to have
* zone transfers. Or the xfr needs to be created if it is set to
* have zone transfers. */
if(loadxfr && xfr) {
/* Copy the config from loadxfr to the xfr in current use. */
auth_xfr_pickup_config(loadxfr, xfr);
} else if(!loadxfr && xfr) {
/* Delete the xfr. */
(void)rbtree_delete(&worker->env.auth_zones->xtree,
&xfr->node);
todelete = 1;
item->new_z->zone_is_slave = 0;
} else if(loadxfr && !xfr) {
/* Create the xfr. */
xfr = auth_xfer_create(worker->env.auth_zones, item->new_z);
auth_xfr_pickup_config(loadxfr, xfr);
item->new_z->zone_is_slave = 1;
}
lock_rw_unlock(&item->new_z->lock);
lock_rw_unlock(&item->old_z->lock);
lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
lock_rw_unlock(&worker->env.auth_zones->lock);
if(loadxfr) {
lock_basic_unlock(&loadxfr->lock);
}
if(xfr) {
lock_basic_unlock(&xfr->lock);
}
if(todelete) {
auth_xfer_delete(xfr);
}
void
fast_reload_worker_pickup_changes(struct worker* worker)
{
/* The pickup of changes is called when the fast reload has
* a syncronized moment, and all the threads are paused and the
* reload has been applied. Then the worker can pick up the new
* changes and store them in worker-specific structs.
* The pickup is also called when there is no pause, and then
* it is called after the reload has completed, and the worker
* get a signal to release old information, it can then pick
* up the new information. But in the mean time, the reload has
* swapped in trees, and the worker has been running with the
* older information for some time. */
fr_worker_pickup_mesh(worker);
/* If the tcp connection limit has changed, the open connections
* need to remove their reference for the old tcp limits counters. */
if(worker->daemon->fast_reload_tcl_has_changes)
tcl_remove_old(worker->front);
/* If there are zonemd lookups, but the zone was deleted, the
* lookups should be cancelled. */
fr_worker_pickup_auth_changes(worker,
worker->daemon->fast_reload_thread->auth_zone_change_list);
#ifdef USE_CACHEDB
worker->env.cachedb_enabled = worker->daemon->env->cachedb_enabled;
#endif
fr_worker_pickup_outside_network(worker);
}
/** fast reload thread, handle reload_stop notification, send reload stop
* to other threads over IPC and collect their ack. When that is done,
* ack to the caller, the fast reload thread, and wait for it to send start. */
static void
fr_main_perform_reload_stop(struct fast_reload_thread* fr)
{
struct daemon* daemon = fr->worker->daemon;
int i;
/* Send reload_stop to other threads. */
for(i=0; i<daemon->num; i++) {
if(i == fr->worker->thread_num)
continue; /* Do not send to ourselves. */
worker_send_cmd(daemon->workers[i], worker_cmd_reload_stop);
}
/* Wait for the other threads to ack. */
fr_read_ack_from_workers(fr);
/* Send ack to fast reload thread. */
fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
/* Wait for reload_start from fast reload thread to resume. */
fr_poll_for_reload_start(fr);
/* Send reload_start to other threads */
for(i=0; i<daemon->num; i++) {
if(i == fr->worker->thread_num)
continue; /* Do not send to ourselves. */
worker_send_cmd(daemon->workers[i], worker_cmd_reload_start);
}
/* Pick up changes for this worker. */
if(fr->worker->daemon->fast_reload_drop_mesh) {
verbose(VERB_ALGO, "worker: drop mesh queries after reload");
mesh_delete_all(fr->worker->env.mesh);
}
fast_reload_worker_pickup_changes(fr->worker);
/* Wait for the other threads to ack. */
fr_read_ack_from_workers(fr);
/* Send ack to fast reload thread. */
fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
verbose(VERB_ALGO, "worker resume after reload");
}
/** Fast reload, the main thread performs the nopause poll. It polls every
* other worker thread briefly over the command pipe ipc. The command takes
* no time for the worker, it can return immediately. After that it sends
* an acknowledgement to the fastreload thread. */
static void
fr_main_perform_reload_nopause_poll(struct fast_reload_thread* fr)
{
struct daemon* daemon = fr->worker->daemon;
int i;
/* Send the reload_poll to other threads. They can respond
* one at a time. */
for(i=0; i<daemon->num; i++) {
if(i == fr->worker->thread_num)
continue; /* Do not send to ourselves. */
worker_send_cmd(daemon->workers[i], worker_cmd_reload_poll);
}
/* Wait for the other threads to ack. */
fr_read_ack_from_workers(fr);
fast_reload_worker_pickup_changes(fr->worker);
/* Send ack to fast reload thread. */
fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
}
/** Fast reload, perform the command received from the fast reload thread */
static void
fr_main_perform_cmd(struct fast_reload_thread* fr,
enum fast_reload_notification status)
{
verbose(VERB_ALGO, "main perform fast reload status: %s",
fr_notification_to_string(status));
if(status == fast_reload_notification_printout) {
fr_main_perform_printout(fr);
} else if(status == fast_reload_notification_done ||
status == fast_reload_notification_done_error ||
status == fast_reload_notification_exited) {
fr_main_perform_done(fr);
} else if(status == fast_reload_notification_reload_stop) {
fr_main_perform_reload_stop(fr);
} else if(status == fast_reload_notification_reload_nopause_poll) {
fr_main_perform_reload_nopause_poll(fr);
} else {
log_err("main received unknown status from fast reload: %d %s",
(int)status, fr_notification_to_string(status));
}
}
/** Fast reload, poll for and handle cmd from fast reload thread. */
static void
fr_check_cmd_from_thread(struct fast_reload_thread* fr)
{
int inevent = 0;
struct worker* worker = fr->worker;
/* Stop in case the thread has exited, or there is no read event. */
while(worker->daemon->fast_reload_thread) {
if(!sock_poll_timeout(fr->commpair[0], 0, 1, 0, &inevent)) {
log_err("check for cmd from fast reload thread: "
"poll failed");
#ifdef USE_WINSOCK
if(worker->daemon->fast_reload_thread)
ub_winsock_tcp_wouldblock(worker->daemon->
fast_reload_thread->service_event,
UB_EV_READ);
#endif
return;
}
if(!inevent) {
#ifdef USE_WINSOCK
if(worker->daemon->fast_reload_thread)
ub_winsock_tcp_wouldblock(worker->daemon->
fast_reload_thread->service_event,
UB_EV_READ);
#endif
return;
}
fr_main_handle_cmd(fr);
}
}
/* Read and handle the command */
fr_main_handle_cmd(fast_reload_thread);
if(worker->daemon->fast_reload_thread != NULL) {
/* If not exited, see if there are more pending statuses
* from the fast reload thread. */
fr_check_cmd_from_thread(fast_reload_thread);
}
}
#ifdef HAVE_SSL
/** fast reload, send client item over SSL. Returns number of bytes
* printed, 0 on wait later, or -1 on failure. */
static int
fr_client_send_item_ssl(struct fast_reload_printq* printq)
{
int r;
ERR_clear_error();
r = SSL_write(printq->remote.ssl,
printq->client_item+printq->client_byte_count,
printq->client_len - printq->client_byte_count);
if(r <= 0) {
int want = SSL_get_error(printq->remote.ssl, r);
if(want == SSL_ERROR_ZERO_RETURN) {
log_err("fast_reload print to remote client: "
"SSL_write says connection closed.");
return -1;
} else if(want == SSL_ERROR_WANT_READ) {
/* wait for read condition */
printq->client_cp->ssl_shake_state = comm_ssl_shake_hs_read;
comm_point_listen_for_rw(printq->client_cp, 1, 0);
return 0;
} else if(want == SSL_ERROR_WANT_WRITE) {
#ifdef USE_WINSOCK
ub_winsock_tcp_wouldblock(comm_point_internal(printq->client_cp), UB_EV_WRITE);
#endif
return 0; /* write more later */
} else if(want == SSL_ERROR_SYSCALL) {
#ifdef EPIPE
if(errno == EPIPE && verbosity < 2) {
/* silence 'broken pipe' */
return -1;
}
#endif
if(errno != 0)
log_err("fast_reload print to remote client: "
"SSL_write syscall: %s",
sock_strerror(errno));
return -1;
}
log_crypto_err_io("fast_reload print to remote client: "
"could not SSL_write", want);
return -1;
}
return r;
}
#endif /* HAVE_SSL */
/** fast reload, send current client item. false on failure or wait later. */
static int
fr_client_send_item(struct fast_reload_printq* printq)
{
int r;
#ifdef HAVE_SSL
if(printq->remote.ssl) {
r = fr_client_send_item_ssl(printq);
} else {
#endif
r = fr_client_send_item_fd(printq);
#ifdef HAVE_SSL
}
#endif
if(r == 0) {
/* Wait for later. */
return 0;
} else if(r == -1) {
/* It failed, close comm point and stop sending. */
fr_printq_remove(printq);
return 0;
}
printq->client_byte_count += r;
if(printq->client_byte_count < printq->client_len)
return 0; /* Print more later. */
return 1;
}
/** fast reload, pick up the next item to print */
static void
fr_client_pickup_next_item(struct fast_reload_printq* printq)
{
struct config_strlist* item;
/* Pop first off the list. */
if(!printq->to_print->first) {
printq->client_item = NULL;
printq->client_len = 0;
printq->client_byte_count = 0;
return;
}
item = printq->to_print->first;
if(item->next) {
printq->to_print->first = item->next;
} else {
printq->to_print->first = NULL;
printq->to_print->last = NULL;
}
item->next = NULL;
printq->client_len = 0;
printq->client_byte_count = 0;
printq->client_item = item->str;
item->str = NULL;
free(item);
/* The len is the number of bytes to print out, and thus excludes
* the terminator zero. */
if(printq->client_item)
printq->client_len = (int)strlen(printq->client_item);
}
int fast_reload_client_callback(struct comm_point* ATTR_UNUSED(c), void* arg,
int err, struct comm_reply* ATTR_UNUSED(rep))
{
struct fast_reload_printq* printq = (struct fast_reload_printq*)arg;
if(!printq->client_cp) {
fr_printq_remove(printq);
return 0; /* the output is closed and deleted */
}
if(err != NETEVENT_NOERROR) {
verbose(VERB_ALGO, "fast reload client: error, close it");
fr_printq_remove(printq);
return 0;
}
#ifdef HAVE_SSL
if(printq->client_cp->ssl_shake_state == comm_ssl_shake_hs_read) {
/* read condition satisfied back to writing */
comm_point_listen_for_rw(printq->client_cp, 0, 1);
printq->client_cp->ssl_shake_state = comm_ssl_shake_none;
}
#endif /* HAVE_SSL */
/* Pickup an item if there are none */
if(!printq->client_item) {
fr_client_pickup_next_item(printq);
}
if(!printq->client_item) {
if(printq->in_list) {
/* Nothing more to print, it can be removed. */
fr_printq_remove(printq);
return 0;
}
/* Done with printing for now. */
comm_point_stop_listening(printq->client_cp);
return 0;
}
/* Try to print out a number of items, if they can print in full. */
while(printq->client_item) {
/* Send current item, if any. */
if(printq->client_item && printq->client_len != 0 &&
printq->client_byte_count < printq->client_len) {
if(!fr_client_send_item(printq))
return 0;
}
/* The current item is done. */
if(printq->client_item) {
free(printq->client_item);
printq->client_item = NULL;
printq->client_len = 0;
printq->client_byte_count = 0;
}
if(!printq->to_print->first) {
if(printq->in_list) {
/* Nothing more to print, it can be removed. */
fr_printq_remove(printq);
return 0;
}
/* Done with printing for now. */
comm_point_stop_listening(printq->client_cp);
return 0;
}
fr_client_pickup_next_item(printq);
}
/** fast reload printq, returns true if the list is empty and no item */
static int
fr_printq_empty(struct fast_reload_printq* printq)
{
if(printq->to_print->first == NULL && printq->client_item == NULL)
return 1;
return 0;
}
/** fast reload printq delete list */
void
fast_reload_printq_list_delete(struct fast_reload_printq* list)
{
struct fast_reload_printq* printq = list, *next;
while(printq) {
next = printq->next;
fr_printq_delete(printq);
printq = next;
}
}
/** fast reload printq remove the item from the printq list */
static void
fr_printq_list_remove(struct fast_reload_printq* printq)
{
struct daemon* daemon = printq->worker->daemon;
if(printq->prev == NULL)
daemon->fast_reload_printq_list = printq->next;
else printq->prev->next = printq->next;
if(printq->next)
printq->next->prev = printq->prev;
printq->in_list = 0;
}
/** fast reload printq, remove the printq when no longer needed,
* like the stream is closed. */
static void
fr_printq_remove(struct fast_reload_printq* printq)
{
if(!printq)
return;
if(printq->worker->daemon->fast_reload_thread &&
printq->worker->daemon->fast_reload_thread->printq == printq)
printq->worker->daemon->fast_reload_thread->printq = NULL;
if(printq->in_list)
fr_printq_list_remove(printq);
fr_printq_delete(printq);
}
/** fast reload thread, send stop command to the thread, from the main thread.
*/
static void
fr_send_stop(struct fast_reload_thread* fr)
{
fr_send_cmd_to(fr, fast_reload_notification_exit, 1, 0);
}
void
fast_reload_thread_start(RES* ssl, struct worker* worker, struct rc_state* s,
int fr_verb, int fr_nopause, int fr_drop_mesh)
{
if(worker->daemon->fast_reload_thread) {
log_err("fast reload thread already running");
return;
}
if(!fast_reload_thread_setup(worker, fr_verb, fr_nopause,
fr_drop_mesh)) {
if(!ssl_printf(ssl, "error could not setup thread\n"))
return;
return;
}
worker->daemon->fast_reload_thread->started = 1;
#ifndef THREADS_DISABLED
/* Setup command listener in remote servicing thread */
/* The listener has to be nonblocking, so the the remote servicing
* thread can continue to service DNS queries, the fast reload
* thread is going to read the config from disk and apply it. */
/* The commpair[1] element can stay blocking, it is used by the
* fast reload thread to communicate back. The thread needs to wait
* at these times, when it has to check briefly it can use poll. */
fd_set_nonblock(worker->daemon->fast_reload_thread->commpair[0]);
worker->daemon->fast_reload_thread->service_event = ub_event_new(
comm_base_internal(worker->base),
worker->daemon->fast_reload_thread->commpair[0],
UB_EV_READ | UB_EV_PERSIST, fast_reload_service_cb,
worker->daemon->fast_reload_thread);
if(!worker->daemon->fast_reload_thread->service_event) {
fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
if(!ssl_printf(ssl, "error out of memory\n"))
return;
return;
}
if(ub_event_add(worker->daemon->fast_reload_thread->service_event,
NULL) != 0) {
fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
if(!ssl_printf(ssl, "error out of memory adding service event\n"))
return;
return;
}
worker->daemon->fast_reload_thread->service_event_is_added = 1;
/* Setup the comm point to the remote control client as an event
* on the remote servicing thread, which it already is.
* It needs a new callback to service it. */
log_assert(s);
state_list_remove_elem(&s->rc->busy_list, s->c);
s->rc->active --;
/* Set the comm point file descriptor to nonblocking. So that
* printout to the remote control client does not block the
* server thread from servicing DNS queries. */
fd_set_nonblock(s->c->fd);
worker->daemon->fast_reload_thread->printq = fr_printq_create(s->c,
worker);
if(!worker->daemon->fast_reload_thread->printq) {
fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
if(!ssl_printf(ssl, "error out of memory create printq\n"))
return;
return;
}
worker->daemon->fast_reload_thread->printq->remote = *ssl;
s->rc = NULL; /* move away the rc state */
/* Nothing to print right now, so no need to have it active. */
comm_point_stop_listening(worker->daemon->fast_reload_thread->printq->client_cp);
void
fast_reload_thread_stop(struct fast_reload_thread* fast_reload_thread)
{
struct worker* worker = fast_reload_thread->worker;
if(!fast_reload_thread)
return;
fr_send_stop(fast_reload_thread);
if(worker->daemon->fast_reload_thread != NULL) {
/* If it did not exit yet, join with the thread now. It is
* going to exit because the exit command is sent to it. */
fr_main_perform_done(fast_reload_thread);
}
}