/*
* Copyright (c) 2011 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Coyote Point Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Reduces the resources demanded by TCP sessions in TIME_WAIT-state using
* methods called Vestigial Time-Wait (VTW) and Maximum Segment Lifetime
* Truncation (MSLT).
*
* MSLT and VTW were contributed by Coyote Point Systems, Inc.
*
* Even after a TCP session enters the TIME_WAIT state, its corresponding
* socket and protocol control blocks (PCBs) stick around until the TCP
* Maximum Segment Lifetime (MSL) expires. On a host whose workload
* necessarily creates and closes down many TCP sockets, the sockets & PCBs
* for TCP sessions in TIME_WAIT state amount to many megabytes of dead
* weight in RAM.
*
* Maximum Segment Lifetimes Truncation (MSLT) assigns each TCP session to
* a class based on the nearness of the peer. Corresponding to each class
* is an MSL, and a session uses the MSL of its class. The classes are
* loopback (local host equals remote host), local (local host and remote
* host are on the same link/subnet), and remote (local host and remote
* host communicate via one or more gateways). Classes corresponding to
* nearer peers have lower MSLs by default: 2 seconds for loopback, 10
* seconds for local, 60 seconds for remote. Loopback and local sessions
* expire more quickly when MSLT is used.
*
* Vestigial Time-Wait (VTW) replaces a TIME_WAIT session's PCB/socket
* dead weight with a compact representation of the session, called a
* "vestigial PCB". VTW data structures are designed to be very fast and
* memory-efficient: for fast insertion and lookup of vestigial PCBs,
* the PCBs are stored in a hash table that is designed to minimize the
* number of cacheline visits per lookup/insertion. The memory both
* for vestigial PCBs and for elements of the PCB hashtable come from
* fixed-size pools, and linked data structures exploit this to conserve
* memory by representing references with a narrow index/offset from the
* start of a pool instead of a pointer. When space for new vestigial PCBs
* runs out, VTW makes room by discarding old vestigial PCBs, oldest first.
* VTW cooperates with MSLT.
*
* It may help to think of VTW as a "FIN cache" by analogy to the SYN
* cache.
*
* A 2.8-GHz Pentium 4 running a test workload that creates TIME_WAIT
* sessions as fast as it can is approximately 17% idle when VTW is active
* versus 0% idle when VTW is inactive. It has 103 megabytes more free RAM
* when VTW is active (approximately 64k vestigial PCBs are created) than
* when it is inactive.
*/
/* We provide state for the lookup_ports iterator.
* As currently we are netlock-protected, there is one.
* If we were finer-grain, we would have one per CPU.
* I do not want to be in the business of alloc/free.
* The best alternate would be allocate on the caller's
* stack, but that would require them to know the struct,
* or at least the size.
* See how she goes.
*/
struct tcp_ports_iterator {
union {
struct in_addr v4;
struct in6_addr v6;
} addr;
u_int port;
/*!\brief initialise a collection of fat pointers.
*
*\param n # hash buckets
*\param m total # fat pointers to allocate
*
* We allocate 2x as much, as we have two hashes: full and lport only.
*/
static void
fatp_init(fatp_ctl_t *fat, uint32_t n, uint32_t m,
fatp_t *fat_base, fatp_t **fat_hash)
{
fatp_t *fp;
KASSERT(n <= FATP_MAX / 2);
fat->hash = fat_hash;
fat->base = fat_base;
fat->port = &fat->hash[m];
fat->mask = m - 1; // ASSERT is power of 2 (m)
fat->lim = fat->base + 2*n - 1;
fat->nfree = 0;
fat->nalloc = 2*n;
/* Initialise the free list.
*/
for (fp = fat->lim; fp >= fat->base; --fp) {
fatp_free(fat, fp);
}
}
/*
* The `xtra' is XORed into the tag stored.
*/
static uint32_t fatp_xtra[] = {
0x11111111,0x22222222,0x33333333,0x44444444,
0x55555555,0x66666666,0x77777777,0x88888888,
0x12121212,0x21212121,0x34343434,0x43434343,
0x56565656,0x65656565,0x78787878,0x87878787,
0x11221122,0x22112211,0x33443344,0x44334433,
0x55665566,0x66556655,0x77887788,0x88778877,
0x11112222,0x22221111,0x33334444,0x44443333,
0x55556666,0x66665555,0x77778888,0x88887777,
};
/*!\brief turn a {fatp_t*,slot} into an integral key.
*
* The key can be used to obtain the fatp_t, and the slot,
* as it directly encodes them.
*/
static inline uint32_t
fatp_key(fatp_ctl_t *fat, fatp_t *fp, uint32_t slot)
{
CTASSERT(CACHE_LINE_SIZE == 32 ||
CACHE_LINE_SIZE == 64 ||
CACHE_LINE_SIZE == 128 ||
CACHE_LINE_SIZE == 256);
switch (fatp_ntags()) {
case 7:
return (fatp_index(fat, fp) << 3) | slot;
case 15:
return (fatp_index(fat, fp) << 4) | slot;
case 31:
return (fatp_index(fat, fp) << 5) | slot;
default:
KASSERT(0 && "no support, for no good reason");
return ~0;
}
}
switch (fatp_ntags()) {
case 7:
return key & 7;
case 15:
return key & 15;
case 31:
return key & 31;
default:
KASSERT(0 && "no support, for no good reason");
return ~0;
}
}
switch (fatp_ntags()) {
case 7:
key >>= 3;
break;
case 15:
key >>= 4;
break;
case 31:
key >>= 5;
break;
default:
KASSERT(0 && "no support, for no good reason");
return 0;
}
/*!\brief insert index into fatp hash
*
*\param idx - index of element being placed in hash chain
*\param tag - 32-bit tag identifier
*
*\returns
* value which can be used to locate entry.
*
*\note
* we rely on the fact that there are unused high bits in the index
* for verification purposes on lookup.
*/
/* All entries are inuse at the top level.
* We allocate a spare, and push the top level
* down one. All entries in the fp we push down
* (think of a tape worm here) will be expelled sooner than
* any entries added subsequently to this hash bucket.
* This is a property of the time waits we are exploiting.
*/
/*!\brief return the next vtw after this one.
*
* Due to the differing sizes of the entries in differing
* arenas, we have to ensure we ++ the correct pointer type.
*
* Also handles wrap.
*/
static inline vtw_t *
vtw_next(vtw_ctl_t *ctl, vtw_t *vtw)
{
if (ctl->is_v4) {
vtw_v4_t *v4 = (void*)vtw;
/* When we delete entries, we do not compact. This is
* due to temporality. We add entries, and they
* (eventually) expire. Older entries will be further
* down the chain.
*/
if (!fp->inuse) {
uint32_t hi = tag & fat->mask;
fatp_t *fq = 0;
fatp_t *fr = fat->hash[hi];
goto out;
} else if (vtw_alive(vtw)) {
++vtw_stats.losing[1];
++losings;
db_trace(KTR_VTW
, (vtw, "vtw:!mis"
" port %8.8x:%4.4x %8.8x:%4.4x"
" key %x port %x"
, v4->faddr, v4->fport
, v4->laddr, v4->lport
, vtw->key
, lport));
} else {
/* Really losing here. We are coming
* up with references to free entries.
* Might find it better to use
* traditional, or need another
* add-hockery. The other add-hockery
* would be to pul more into into the
* cache line to reject the false
* hits.
*/
++vtw_stats.losing[1];
++losings;
db_trace(KTR_VTW
, (fp, "vtw:!mis port %x"
" - free entry idx %x vtw %p"
, lport
, idx_decode(ctl, idx)
, vtw));
}
}
it->slot_idx = i + 1;
goto out;
} else if (vtw_alive(vtw)) {
++vtw_stats.losing[1];
db_trace(KTR_VTW
, (vtw, "vtw:!mis port %6A:%4.4x"
" %6A:%4.4x key %x port %x"
, db_store(&v6->faddr
, sizeof (v6->faddr))
, v6->fport
, db_store(&v6->laddr
, sizeof (v6->faddr))
, v6->lport
, vtw->key
, lport));
} else {
/* Really losing here. We are coming
* up with references to free entries.
* Might find it better to use
* traditional, or need another
* add-hockery. The other add-hockery
* would be to pul more into into the
* cache line to reject the false
* hits.
*/
++vtw_stats.losing[1];
++losings;
vtw = 0;
out:
if (fatps > vtw_stats.max_chain[1])
vtw_stats.max_chain[1] = fatps;
if (probes > vtw_stats.max_probe[1])
vtw_stats.max_probe[1] = probes;
if (losings > vtw_stats.max_loss[1])
vtw_stats.max_loss[1] = losings;
return vtw;
}
/*!\brief initialise the VTW allocation arena
*
* There are 1+3 allocation classes:
* 0 classless
* {1,2,3} MSL-class based allocation
*
* The allocation arenas are all initialised. Classless gets all the
* space. MSL-class based divides the arena, so that allocation
* within a class can proceed without having to consider entries
* (aka: cache lines) from different classes.
*
* Usually, we are completely classless or class-based, but there can be
* transition periods, corresponding to dynamic adjustments in the config
* by the operator.
*/
static void
vtw_init(fatp_ctl_t *fat, vtw_ctl_t *ctl, const uint32_t n, vtw_t *ctl_base_v)
{
int class_n, i;
vtw_t *base;
ctl->base.v = ctl_base_v;
if (ctl->is_v4) {
ctl->lim.v4 = ctl->base.v4 + n - 1;
ctl->alloc.v4 = ctl->base.v4;
} else {
ctl->lim.v6 = ctl->base.v6 + n - 1;
ctl->alloc.v6 = ctl->base.v6;
}
/* Divide the resources equally amongst the classes.
* This is not optimal, as the different classes
* arrive and leave at different rates, but it is
* the best I can do for now.
*/
class_n = n / (VTW_NCLASS-1);
base = ctl->base.v;
for (i = 1; i < VTW_NCLASS; ++i) {
int j;
ctl[i] = ctl[0];
ctl[i].clidx = i;
ctl[i].base.v = base;
ctl[i].alloc = ctl[i].base;
for (j = 0; j < class_n - 1; ++j) {
if (tcp_msl_enable)
base->msl_class = i;
base = vtw_next(ctl, base);
}
/*!\brief map class to TCP MSL
*/
static inline uint32_t
class_to_msl(int msl_class)
{
switch (msl_class) {
case 0:
case 1:
return tcp_msl_remote ? tcp_msl_remote : (TCPTV_MSL >> 0);
case 2:
return tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
default:
return tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
}
}
/*!\brief map TCP MSL to class
*/
static inline uint32_t
msl_to_class(int msl)
{
if (tcp_msl_enable) {
if (msl <= (tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2)))
return 1+2;
if (msl <= (tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1)))
return 1+1;
return 1;
}
return 0;
}
/*!\brief allocate a vtw entry
*/
static inline vtw_t *
vtw_alloc(vtw_ctl_t *ctl)
{
vtw_t *vtw = 0;
int stuck = 0;
int avail = ctl ? (ctl->nalloc + ctl->nfree) : 0;
int msl;
KASSERT(mutex_owned(softnet_lock));
/* If no resources, we will not get far.
*/
if (!ctl || !ctl->base.v4 || avail <= 0)
return 0;
/* Obtain a free one.
*/
while (!ctl->nfree) {
vtw_age(ctl, 0);
if (++stuck > avail) {
/* When in transition between
* schemes (classless, classed) we
* can be stuck having to await the
* expiration of cross-allocated entries.
*
* Returning zero means we will fall back to the
* traditional TIME_WAIT handling, except in the
* case of a re-shed, in which case we cannot
* perform the reshecd, but will retain the extant
* entry.
*/
db_trace(KTR_VTW
, (ctl, "vtw:!none free in class %x %x/%x"
, ctl->clidx
, ctl->nalloc, ctl->nfree));
return 0;
}
}
vtw = ctl->alloc.v;
if (vtw->msl_class != ctl->clidx) {
/* Usurping rules:
* 0 -> {1,2,3} or {1,2,3} -> 0
*/
KASSERT(!vtw->msl_class || !ctl->clidx);
if (vtw->hashed || vtw->expire.tv_sec) {
/* As this is owned by some other class,
* we must wait for it to expire it.
* This will only happen on class/classless
* transitions, which are guaranteed to progress
* to completion in small finite time, barring bugs.
*/
db_trace(KTR_VTW
, (ctl, "vtw:!%p class %x!=%x %x:%x%s"
, vtw, vtw->msl_class, ctl->clidx
, vtw->expire.tv_sec
, vtw->expire.tv_usec
, vtw->hashed ? " hashed" : ""));
return 0;
}
db_trace(KTR_VTW
, (ctl, "vtw:!%p usurped from %x to %x"
, vtw, vtw->msl_class, ctl->clidx));
vtw->msl_class = ctl->clidx;
}
if (vtw_alive(vtw)) {
KASSERT(0 && "next free not free");
return 0;
}
if (!ctl->oldest.v) {
KASSERT(!ctl->nalloc);
return 0;
}
for (vtw = ctl->oldest.v; vtw && ctl->nalloc; ) {
if (++maxtries > ctl->nalloc)
break;
if (vtw->msl_class != ctl->clidx) {
db_trace(KTR_VTW
, (vtw, "vtw:!age class mismatch %x != %x"
, vtw->msl_class, ctl->clidx));
/* XXXX
* See if the appropriate action is to skip to the next.
* XXXX
*/
ctl->oldest.v = vtw = vtw_next(ctl, vtw);
continue;
}
if (!when) {
/* Latch oldest timeval if none specified.
*/
then = vtw->expire;
when = &then;
}
/*!\brief notice the passage of time.
* It seems to be getting faster. What happened to the year?
*/
static void
vtw_tick(void *arg)
{
struct timeval now;
int i, cnt = 0;
getmicrouptime(&now);
db_trace(KTR_VTW, (arg, "vtk: tick - now %8.8x:%8.8x"
, now.tv_sec, now.tv_usec));
mutex_enter(softnet_lock);
for (i = 0; i < VTW_NCLASS; ++i) {
cnt += vtw_age(&vtw_tcpv4[i], &now);
cnt += vtw_age(&vtw_tcpv6[i], &now);
}
/* Keep ticks coming while we need them.
*/
if (cnt)
callout_schedule(&vtw_cs, hz / 5);
else {
tcp_vtw_was_enabled = 0;
tcbtable.vestige = 0;
}
mutex_exit(softnet_lock);
}
/* Note: the reference to vtw_tcpv4[0] is fine.
* We do not need per-class iteration. We just
* need to get to the fat, and there is one
* shared fat.
*/
if (vtw_tcpv4[0].fat) {
it->addr.v4 = addr;
it->port = port;
it->wild = !!wild;
it->ctl = &vtw_tcpv4[0];
++vtw_stats.look[1];
}
return it;
}
/*!\brief export an IPv4 vtw.
*/
static int
vtw_export_v4(vtw_ctl_t *ctl, vtw_t *vtw, vestigial_inpcb_t *res)
{
vtw_v4_t *v4 = (void*)vtw;
bzero(res, sizeof (*res));
if (ctl && vtw) {
if (!ctl->clidx && vtw->msl_class)
ctl += vtw->msl_class;
else
KASSERT(ctl->clidx == vtw->msl_class);
/*!\brief return next port in the port iterator. yowza.
*/
static int
tcp_next_port_v4(void *arg, struct vestigial_inpcb *res)
{
struct tcp_ports_iterator *it = arg;
vtw_t *vtw = 0;
/* Note: the reference to vtw_tcpv6[0] is fine.
* We do not need per-class iteration. We just
* need to get to the fat, and there is one
* shared fat.
*/
if (vtw_tcpv6[0].fat) {
it->addr.v6 = *addr;
it->port = port;
it->wild = !!wild;
it->ctl = &vtw_tcpv6[0];
++vtw_stats.look[1];
}
return it;
}
/*!\brief export an IPv6 vtw.
*/
static int
vtw_export_v6(vtw_ctl_t *ctl, vtw_t *vtw, vestigial_inpcb_t *res)
{
vtw_v6_t *v6 = (void*)vtw;
bzero(res, sizeof (*res));
if (ctl && vtw) {
if (!ctl->clidx && vtw->msl_class)
ctl += vtw->msl_class;
else
KASSERT(ctl->clidx == vtw->msl_class);
/* Allocate 10% more capacity in the fat pointers.
* We should only need ~#hash additional based on
* how they age, but TIME_WAIT assassination could cause
* sparse fat pointer utilisation.
*/
m = 512;
n = 2*m + (11 * (tcp_vtw_entries / fatp_ntags())) / 10;
sz = (ctl->is_v4 ? sizeof(vtw_v4_t) : sizeof(vtw_v6_t));
/*!\brief add lalp, fafp entries for debug
*/
int
vtw_debug_add(int af, sin_either_t *la, sin_either_t *fa, int msl, int msl_class)
{
vtw_ctl_t *ctl;
vtw_t *vtw;
ctl = vtw_control(af, msl ? msl : class_to_msl(msl_class));
if (!ctl)
return 0;
vtw = vtw_alloc(ctl);
if (vtw) {
vtw->snd_nxt = 0;
vtw->rcv_nxt = 0;
switch (af) {
case AF_INET: {
vtw_v4_t *v4 = (void*)vtw;