/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Stephen Deering of Stanford University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
*/
/*
* Copyright (c) 1989 Stephen Deering
*
* This code is derived from software contributed to Berkeley by
* Stephen Deering of Stanford University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
*/
/*
* IP multicast forwarding procedures
*
* Written by David Waitzman, BBN Labs, August 1988.
* Modified by Steve Deering, Stanford, February 1989.
* Modified by Mark J. Steiglitz, Stanford, May, 1991
* Modified by Van Jacobson, LBL, January 1993
* Modified by Ajit Thyagarajan, PARC, August 1993
* Modified by Bill Fenner, PARC, April 1994
* Modified by Charles M. Hannum, NetBSD, May 1995.
* Modified by Ahmed Helmy, SGI, June 1996
* Modified by George Edmond Eddy (Rusty), ISI, February 1998
* Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
* Modified by Hitoshi Asaeda, WIDE, August 2000
* Modified by Pavlin Radoslavov, ICSI, October 2002
*
* MROUTING Revision: 1.2
* and PIM-SMv2 and PIM-DM support, advanced API support,
* bandwidth metering and signaling
*/
#define IP_MULTICASTOPTS 0
#define M_PULLUP(m, len) \
do { \
if ((m) && ((m)->m_flags & M_EXT || (m)->m_len < (len))) \
(m) = m_pullup((m), (len)); \
} while (/*CONSTCOND*/ 0)
/*
* Globals. All but ip_mrouter and ip_mrtproto could be static,
* except for netstat or debugging purposes.
*/
struct socket *ip_mrouter = NULL;
int ip_mrtproto = IGMP_DVMRP; /* for netstat only */
/*
* Pending timeouts are stored in a hash table, the key being the
* expiration time. Periodically, the entries are analysed and processed.
*/
#define BW_METER_BUCKETS 1024
static struct bw_meter *bw_meter_timers[BW_METER_BUCKETS];
struct callout bw_meter_ch;
#define BW_METER_PERIOD (hz) /* periodical handling of bw meters */
/*
* Pending upcalls are stored in a vector which is flushed when
* full, or periodically
*/
static struct bw_upcall bw_upcalls[BW_UPCALLS_MAX];
static u_int bw_upcalls_n; /* # of pending upcalls */
struct callout bw_upcalls_ch;
#define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */
#ifdef PIM
struct pimstat pimstat;
/*
* Note: the PIM Register encapsulation adds the following in front of a
* data packet:
*
* struct pim_encap_hdr {
* struct ip ip;
* struct pim_encap_pimhdr pim;
* }
*/
/*
* whether or not special PIM assert processing is enabled.
*/
static int pim_assert;
/*
* Rate limit for assert notification messages, in usec
*/
#define ASSERT_MSG_TIME 3000000
/*
* Kernel multicast routing API capabilities and setup.
* If more API capabilities are added to the kernel, they should be
* recorded in `mrt_api_support'.
*/
static const u_int32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
MRT_MFC_FLAGS_BORDER_VIF |
MRT_MFC_RP |
MRT_MFC_BW_UPCALL);
static u_int32_t mrt_api_config = 0;
/*
* Find a route for a given origin IP address and Multicast group address
* Type of service parameter to be added in the future!!!
* Statistics are updated by the caller if needed
* (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses)
*/
static struct mfc *
mfc_find(struct in_addr *o, struct in_addr *g)
{
struct mfc *rt;
/*
* returns the packet, byte, rpf-failure count for the source group provided
*/
static int
get_sg_cnt(struct sioc_sg_req *req)
{
int s;
struct mfc *rt;
/*
* returns the input and output packet and byte counts on the vif provided
*/
static int
get_vif_cnt(struct sioc_vif_req *req)
{
vifi_t vifi = req->vifi;
if (mrtdebug)
log(LOG_DEBUG, "ip_mrouter_init\n");
return 0;
}
/*
* Disable multicast routing
*/
int
ip_mrouter_done(void)
{
vifi_t vifi;
struct vif *vifp;
int i;
int s;
s = splsoftnet();
/* Clear out all the vifs currently in use. */
for (vifi = 0; vifi < numvifs; vifi++) {
vifp = &viftable[vifi];
if (!in_nullhost(vifp->v_lcl_addr))
reset_vif(vifp);
}
/* XXX not sure about side effect to userland routing daemon */
for (vifi = 0; vifi < numvifs; vifi++) {
vifp = &viftable[vifi];
if (vifp->v_ifp == ifp)
reset_vif(vifp);
}
for (i = 0; i < MFCTBLSIZ; i++) {
if (nexpire[i] == 0)
continue;
LIST_FOREACH(rt, &mfchashtbl[i], mfc_hash) {
for (rte = rt->mfc_stall; rte; rte = rte->next) {
if (rte->ifp == ifp)
rte->ifp = NULL;
}
}
}
}
/*
* Set PIM assert processing global
*/
static int
set_assert(int i)
{
pim_assert = !!i;
return 0;
}
/*
* Configure API capabilities
*/
static int
set_api_config(struct sockopt *sopt)
{
u_int32_t apival;
int i, error;
/*
* We can set the API capabilities only if it is the first operation
* after MRT_INIT. I.e.:
* - there are no vifs installed
* - pim_assert is not enabled
* - the MFC table is empty
*/
error = sockopt_get(sopt, &apival, sizeof(apival));
if (error)
return error;
if (numvifs > 0)
return EPERM;
if (pim_assert)
return EPERM;
for (i = 0; i < MFCTBLSIZ; i++) {
if (LIST_FIRST(&mfchashtbl[i]) != NULL)
return EPERM;
}
/*
* Add a vif to the vif table
*/
static int
add_vif(struct vifctl *vifcp)
{
struct vif *vifp;
struct ifnet *ifp;
int error, s;
struct sockaddr_in sin;
if (vifcp->vifc_vifi >= MAXVIFS)
return EINVAL;
if (in_nullhost(vifcp->vifc_lcl_addr))
return EADDRNOTAVAIL;
vifp = &viftable[vifcp->vifc_vifi];
if (!in_nullhost(vifp->v_lcl_addr))
return EADDRINUSE;
/* Find the interface with an address in AF_INET family. */
#ifdef PIM
if (vifcp->vifc_flags & VIFF_REGISTER) {
/*
* XXX: Because VIFF_REGISTER does not really need a valid
* local interface (e.g. it could be 127.0.0.2), we don't
* check its address.
*/
ifp = NULL;
} else
#endif
{
struct ifaddr *ifa;
sockaddr_in_init(&sin, &vifcp->vifc_lcl_addr, 0);
s = pserialize_read_enter();
ifa = ifa_ifwithaddr(sintosa(&sin));
if (ifa == NULL) {
pserialize_read_exit(s);
return EADDRNOTAVAIL;
}
ifp = ifa->ifa_ifp;
/* FIXME NOMPSAFE */
pserialize_read_exit(s);
}
if (vifcp->vifc_flags & VIFF_TUNNEL) {
if (vifcp->vifc_flags & VIFF_SRCRT) {
log(LOG_ERR, "source routed tunnels not supported\n");
return EOPNOTSUPP;
}
/* attach this vif to decapsulator dispatch table */
/*
* XXX Use addresses in registration so that matching
* can be done with radix tree in decapsulator. But,
* we need to check inner header for multicast, so
* this requires both radix tree lookup and then a
* function to check, and this is not supported yet.
*/
error = encap_lock_enter();
if (error)
return error;
vifp->v_encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4,
vif_encapcheck, &vif_encapsw, vifp);
encap_lock_exit();
if (!vifp->v_encap_cookie)
return EINVAL;
s = splsoftnet();
rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
/* If an entry already exists, just update the fields */
if (rt) {
if (mrtdebug & DEBUG_MFC)
log(LOG_DEBUG, "add_mfc update o %x g %x p %x\n",
ntohl(mfccp->mfcc_origin.s_addr),
ntohl(mfccp->mfcc_mcastgrp.s_addr),
mfccp->mfcc_parent);
update_mfc_params(rt, mfccp);
splx(s);
return 0;
}
/*
* Find the entry for which the upcall was made and update
*/
nstl = 0;
hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
rt->mfc_stall != NULL) {
if (nstl++)
log(LOG_ERR, "add_mfc %s o %x g %x p %x dbx %p\n",
"multiple kernel entries",
ntohl(mfccp->mfcc_origin.s_addr),
ntohl(mfccp->mfcc_mcastgrp.s_addr),
mfccp->mfcc_parent, rt->mfc_stall);
if (mrtdebug & DEBUG_MFC)
log(LOG_DEBUG, "add_mfc o %x g %x p %x dbg %p\n",
ntohl(mfccp->mfcc_origin.s_addr),
ntohl(mfccp->mfcc_mcastgrp.s_addr),
mfccp->mfcc_parent, rt->mfc_stall);
rt->mfc_expire = 0; /* Don't clean this guy up */
nexpire[hash]--;
/* free packets Qed at the end of this entry */
for (; rte != NULL; rte = nrte) {
nrte = rte->next;
if (rte->ifp) {
ip_mdq(rte->m, rte->ifp, rt);
}
m_freem(rte->m);
#ifdef UPCALL_TIMING
collate(&rte->t);
#endif /* UPCALL_TIMING */
free(rte, M_MRTABLE);
}
}
}
/*
* It is possible that an entry is being inserted without an upcall
*/
if (nstl == 0) {
/*
* No mfc; make a new one
*/
if (mrtdebug & DEBUG_MFC)
log(LOG_DEBUG, "add_mfc no upcall o %x g %x p %x\n",
ntohl(mfccp->mfcc_origin.s_addr),
ntohl(mfccp->mfcc_mcastgrp.s_addr),
mfccp->mfcc_parent);
LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
init_mfc_params(rt, mfccp);
if (rt->mfc_expire)
nexpire[hash]--;
rt->mfc_expire = 0;
break; /* XXX */
}
}
if (rt == NULL) { /* no upcall, so make a new entry */
rt = malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
if (rt == NULL) {
splx(s);
return ENOBUFS;
}
static int
socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
{
if (s) {
if (sbappendaddr(&s->so_rcv, sintosa(src), mm, NULL) != 0) {
sorwakeup(s);
return 0;
}
soroverflow(s);
}
m_freem(mm);
return -1;
}
/*
* IP multicast forwarding function. This function assumes that the packet
* pointed to by "ip" has arrived on (or is about to be sent to) the interface
* pointed to by "ifp", and the packet is to be relayed to other networks
* that have members of the packet's destination IP multicast group.
*
* The packet is returned unscathed to the caller, unless it is
* erroneous, in which case a non-zero return value tells the caller to
* discard it.
*/
#define IP_HDR_LEN 20 /* # bytes of fixed IP header (excluding options) */
#define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */
int
ip_mforward(struct mbuf *m, struct ifnet *ifp)
{
int rc;
/*
* save csum_flags to uphold the
* "unscathed" guarantee.
* ip_output() relies on that and
* without it we send out
* multicast packets with an invalid
* checksum
*
* see PR kern/55779
*/
int csum_flags = m->m_pkthdr.csum_flags;
/*
* Temporarily clear any in-bound checksum flags for this packet.
*/
m->m_pkthdr.csum_flags = 0;
rc = ip_mforward_real(m, ifp);
m->m_pkthdr.csum_flags = csum_flags;
return rc;
}
static int
ip_mforward_real(struct mbuf *m, struct ifnet *ifp)
{
struct ip *ip = mtod(m, struct ip *);
struct mfc *rt;
static int srctun = 0;
struct mbuf *mm;
struct sockaddr_in sin;
int s;
vifi_t vifi;
/*
* XXX XXX: Why do we check [1] against IPOPT_LSRR? Because we
* expect [0] to be IPOPT_NOP, maybe? In all cases that doesn't
* make a lot of sense, a forged packet can just put two IPOPT_NOPs
* followed by one IPOPT_LSRR, and bypass the check.
*/
if (ip->ip_hl < (IP_HDR_LEN + TUNNEL_LEN) >> 2 ||
((u_char *)(ip + 1))[1] != IPOPT_LSRR) {
/*
* Packet arrived via a physical interface or
* an encapsulated tunnel or a register_vif.
*/
} else {
/*
* Packet arrived through a source-route tunnel.
* Source-route tunnels are no longer supported.
*/
if ((srctun++ % 1000) == 0)
log(LOG_ERR,
"ip_mforward: received source-routed packet from %x\n",
ntohl(ip->ip_src.s_addr));
return EOPNOTSUPP;
}
/*
* Don't forward a packet with time-to-live of zero or one,
* or a packet destined to a local-only group.
*/
if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ip->ip_dst.s_addr))
return 0;
/*
* Determine forwarding vifs from the forwarding cache table
*/
s = splsoftnet();
++mrtstat.mrts_mfc_lookups;
rt = mfc_find(&ip->ip_src, &ip->ip_dst);
/* Entry exists, so forward if necessary */
if (rt != NULL) {
splx(s);
return ip_mdq(m, ifp, rt);
} else {
/*
* If we don't have a route for packet's origin, make a copy
* of the packet and send message to routing daemon.
*/
mrtstat.mrts_no_route++;
if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC))
log(LOG_DEBUG, "ip_mforward: no rte s %x g %x\n",
ntohl(ip->ip_src.s_addr),
ntohl(ip->ip_dst.s_addr));
/*
* Allocate mbufs early so that we don't do extra work if we are
* just going to fail anyway. Make sure to pullup the header so
* that other people can't step on it.
*/
rte = malloc(sizeof(*rte), M_MRTABLE, M_NOWAIT);
if (rte == NULL) {
splx(s);
return ENOBUFS;
}
mb0 = m_copypacket(m, M_DONTWAIT);
M_PULLUP(mb0, hlen);
if (mb0 == NULL) {
free(rte, M_MRTABLE);
splx(s);
return ENOBUFS;
}
/* is there an upcall waiting for this flow? */
hash = MFCHASH(ip->ip_src, ip->ip_dst);
LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
rt->mfc_stall != NULL)
break;
}
if (rt == NULL) {
int i;
struct igmpmsg *im;
/*
* Locate the vifi for the incoming interface for
* this packet.
* If none found, drop packet.
*/
for (vifi = 0; vifi < numvifs &&
viftable[vifi].v_ifp != ifp; vifi++)
;
if (vifi >= numvifs) /* vif not found, drop packet */
goto non_fatal;
/* no upcall, so make a new entry */
rt = malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
if (rt == NULL)
goto fail;
/*
* Make a copy of the header to send to the user level
* process
*/
mm = m_copym(m, 0, hlen, M_DONTWAIT);
M_PULLUP(mm, hlen);
if (mm == NULL)
goto fail1;
/*
* Send message to routing daemon to install
* a route into the kernel table
*/
/* insert new entry at head of hash chain */
rt->mfc_origin = ip->ip_src;
rt->mfc_mcastgrp = ip->ip_dst;
rt->mfc_pkt_cnt = 0;
rt->mfc_byte_cnt = 0;
rt->mfc_wrong_if = 0;
rt->mfc_expire = UPCALL_EXPIRE;
nexpire[hash]++;
for (i = 0; i < numvifs; i++) {
rt->mfc_ttls[i] = 0;
rt->mfc_flags[i] = 0;
}
rt->mfc_parent = -1;
/* clear the RP address */
rt->mfc_rp = zeroin_addr;
rt->mfc_bw_meter = NULL;
/* link into table */
LIST_INSERT_HEAD(&mfchashtbl[hash], rt, mfc_hash);
/* Add this entry to the end of the queue */
rt->mfc_stall = rte;
} else {
/* determine if q has overflowed */
struct rtdetq **p;
int npkts = 0;
/*
* XXX ouch! we need to append to the list, but we
* only have a pointer to the front, so we have to
* scan the entire list every time.
*/
for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next)
if (++npkts > MAX_UPQ) {
mrtstat.mrts_upq_ovflw++;
non_fatal:
free(rte, M_MRTABLE);
m_freem(mb0);
splx(s);
return 0;
}
/* Add this entry to the end of the queue */
*p = rte;
}
/*
* Macro to send packet on vif.
*/
#define MC_SEND(ip, vifp, m) do { \
if ((vifp)->v_flags & VIFF_TUNNEL) \
encap_send((ip), (vifp), (m)); \
else \
phyint_send((ip), (vifp), (m)); \
} while (/*CONSTCOND*/ 0)
/*
* Packet forwarding routine once entry in the cache is made
*/
static int
ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt)
{
struct ip *ip = mtod(m, struct ip *);
vifi_t vifi;
struct vif *vifp;
struct sockaddr_in sin;
const int plen = ntohs(ip->ip_len) - (ip->ip_hl << 2);
/*
* Don't forward if it didn't arrive from the parent vif for its origin.
*/
vifi = rt->mfc_parent;
if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) {
/* came in the wrong interface */
if (mrtdebug & DEBUG_FORWARD)
log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
ifp, vifi,
vifi >= numvifs ? 0 : viftable[vifi].v_ifp);
++mrtstat.mrts_wrong_if;
++rt->mfc_wrong_if;
/*
* If we are doing PIM assert processing, send a message
* to the routing daemon.
*
* XXX: A PIM-SM router needs the WRONGVIF detection so it
* can complete the SPT switch, regardless of the type
* of the iif (broadcast media, GRE tunnel, etc).
*/
if (pim_assert && (vifi < numvifs) && viftable[vifi].v_ifp) {
struct timeval now;
u_int32_t delta;
#ifdef PIM
if (ifp == &multicast_register_if)
pimstat.pims_rcv_registers_wrongiif++;
#endif
/* Get vifi for the incoming packet */
for (vifi = 0;
vifi < numvifs && viftable[vifi].v_ifp != ifp;
vifi++)
;
if (vifi >= numvifs) {
/* The iif is not found: ignore the packet. */
return 0;
}
if (rt->mfc_flags[vifi] &
MRT_MFC_FLAGS_DISABLE_WRONGVIF) {
/* WRONGVIF disabled: ignore the packet */
return 0;
}
/* If I sourced this packet, it counts as output, else it was input. */
if (in_hosteq(ip->ip_src, viftable[vifi].v_lcl_addr)) {
viftable[vifi].v_pkt_out++;
viftable[vifi].v_bytes_out += plen;
} else {
viftable[vifi].v_pkt_in++;
viftable[vifi].v_bytes_in += plen;
}
rt->mfc_pkt_cnt++;
rt->mfc_byte_cnt += plen;
/*
* For each vif, decide if a copy of the packet should be forwarded.
* Forward if:
* - the ttl exceeds the vif's threshold
* - there are group members downstream on interface
*/
for (vifp = viftable, vifi = 0; vifi < numvifs; vifp++, vifi++) {
if ((rt->mfc_ttls[vifi] > 0) &&
(ip->ip_ttl > rt->mfc_ttls[vifi])) {
vifp->v_pkt_out++;
vifp->v_bytes_out += plen;
#ifdef PIM
if (vifp->v_flags & VIFF_REGISTER)
pim_register_send(ip, vifp, m, rt);
else
#endif
MC_SEND(ip, vifp, m);
}
}
/*
* Make a new reference to the packet; make sure that
* the IP header is actually copied, not just referenced,
* so that ip_output() only scribbles on the copy.
*/
mb_copy = m_copypacket(m, M_DONTWAIT);
M_PULLUP(mb_copy, hlen);
if (mb_copy == NULL)
return;
if (vifp->v_rate_limit <= 0)
tbf_send_packet(vifp, mb_copy);
else
tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *),
ntohs(ip->ip_len));
}
static void
encap_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
{
struct mbuf *mb_copy;
struct ip *ip_copy;
int i, len = ntohs(ip->ip_len) + sizeof(multicast_encap_iphdr);
/* Take care of delayed checksums */
if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
in_undefer_cksum_tcpudp(m);
m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
}
/*
* copy the old packet & pullup its IP header into the
* new mbuf so we can modify it. Try to fill the new
* mbuf since if we don't the ethernet driver will.
*/
MGETHDR(mb_copy, M_DONTWAIT, MT_DATA);
if (mb_copy == NULL)
return;
mb_copy->m_data += max_linkhdr;
mb_copy->m_pkthdr.len = len;
mb_copy->m_len = sizeof(multicast_encap_iphdr);
if ((mb_copy->m_next = m_copypacket(m, M_DONTWAIT)) == NULL) {
m_freem(mb_copy);
return;
}
i = MHLEN - max_linkhdr;
if (i > len)
i = len;
mb_copy = m_pullup(mb_copy, i);
if (mb_copy == NULL)
return;
/*
* fill in the encapsulating IP header.
*/
ip_copy = mtod(mb_copy, struct ip *);
*ip_copy = multicast_encap_iphdr;
if (len < IP_MINFRAGSIZE)
ip_copy->ip_id = 0;
else
ip_copy->ip_id = ip_newid();
ip_copy->ip_len = htons(len);
ip_copy->ip_src = vifp->v_lcl_addr;
ip_copy->ip_dst = vifp->v_rmt_addr;
/*
* turn the encapsulated IP header back into a valid one.
*/
ip = (struct ip *)((char *)ip_copy + sizeof(multicast_encap_iphdr));
--ip->ip_ttl;
ip->ip_sum = 0;
mb_copy->m_data += sizeof(multicast_encap_iphdr);
ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
mb_copy->m_data -= sizeof(multicast_encap_iphdr);
/*
* De-encapsulate a packet and feed it back through ip input.
*/
static void
vif_input(struct mbuf *m, int off, int proto, void *eparg)
{
struct vif *vifp = eparg;
KASSERT(vifp != NULL);
if (proto != ENCAP_PROTO) {
m_freem(m);
mrtstat.mrts_bad_tunnel++;
return;
}
m_adj(m, off);
m_set_rcvif(m, vifp->v_ifp);
if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
m_freem(m);
}
}
/*
* Check if the packet should be received on the vif denoted by arg.
* (The encap selection code will call this once per vif since each is
* registered separately.)
*/
static int
vif_encapcheck(struct mbuf *m, int off, int proto, void *arg)
{
struct vif *vifp;
struct ip ip;
#ifdef DIAGNOSTIC
if (!arg || proto != IPPROTO_IPV4)
panic("unexpected arg in vif_encapcheck");
#endif
/*
* Accept the packet only if the inner header is multicast
* and the outer header matches a tunnel-mode vif. Order
* checks in the hope that common non-matching packets will be
* rejected quickly. Assume that unicast IPv4 traffic in a
* parallel tunnel (e.g. gif(4)) is unlikely.
*/
/* Obtain the outer IP header and the vif pointer. */
m_copydata(m, 0, sizeof(ip), (void *)&ip);
vifp = (struct vif *)arg;
/*
* The outer source must match the vif's remote peer address.
* For a multicast router with several tunnels, this is the
* only check that will fail on packets in other tunnels,
* assuming the local address is the same.
*/
if (!in_hosteq(vifp->v_rmt_addr, ip.ip_src))
return 0;
/* The outer destination must match the vif's local address. */
if (!in_hosteq(vifp->v_lcl_addr, ip.ip_dst))
return 0;
/* The vif must be of tunnel type. */
if ((vifp->v_flags & VIFF_TUNNEL) == 0)
return 0;
/* Check that the inner destination is multicast. */
if (off + sizeof(ip) > m->m_pkthdr.len)
return 0;
m_copydata(m, off, sizeof(ip), (void *)&ip);
if (!IN_MULTICAST(ip.ip_dst.s_addr))
return 0;
/*
* We have checked that both the outer src and dst addresses
* match the vif, and that the inner destination is multicast
* (224/5). By claiming more than 64, we intend to
* preferentially take packets that also match a parallel
* gif(4).
*/
return 32 + 32 + 5;
}
if (len > MAX_BKT_SIZE) {
/* drop if packet is too large */
mrtstat.mrts_pkt2large++;
m_freem(m);
return;
}
tbf_update_tokens(vifp);
/*
* If there are enough tokens, and the queue is empty, send this packet
* out immediately. Otherwise, try to insert it on this vif's queue.
*/
if (vifp->tbf_q_len == 0) {
if (len <= vifp->tbf_n_tok) {
vifp->tbf_n_tok -= len;
tbf_send_packet(vifp, m);
} else {
/* queue packet and timeout till later */
tbf_queue(vifp, m);
callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
tbf_reprocess_q, vifp);
}
} else {
if (vifp->tbf_q_len >= vifp->tbf_max_q_len &&
!tbf_dq_sel(vifp, ip)) {
/* queue full, and couldn't make room */
mrtstat.mrts_q_overflow++;
m_freem(m);
} else {
/* queue length low enough, or made room */
tbf_queue(vifp, m);
tbf_process_q(vifp);
}
}
}
/*
* adds a packet to the queue at the interface
*/
static void
tbf_queue(struct vif *vifp, struct mbuf *m)
{
int s = splsoftnet();
/* insert at tail */
*vifp->tbf_t = m;
vifp->tbf_t = &m->m_nextpkt;
vifp->tbf_q_len++;
splx(s);
}
/*
* processes the queue at the interface
*/
static void
tbf_process_q(struct vif *vifp)
{
struct mbuf *m;
int len;
int s = splsoftnet();
/*
* Loop through the queue at the interface and send as many packets
* as possible.
*/
for (m = vifp->tbf_q; m != NULL; m = vifp->tbf_q) {
len = ntohs(mtod(m, struct ip *)->ip_len);
/* determine if the packet can be sent */
if (len <= vifp->tbf_n_tok) {
/* if so,
* reduce no of tokens, dequeue the packet,
* send the packet.
*/
if ((vifp->tbf_q = m->m_nextpkt) == NULL)
vifp->tbf_t = &vifp->tbf_q;
--vifp->tbf_q_len;
if (vifp->tbf_q_len != 0)
callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
tbf_reprocess_q, vifp);
}
/* function that will selectively discard a member of the queue
* based on the precedence value and the priority
*/
static int
tbf_dq_sel(struct vif *vifp, struct ip *ip)
{
u_int p;
struct mbuf **mp, *m;
int s = splsoftnet();
p = priority(vifp, ip);
for (mp = &vifp->tbf_q, m = *mp;
m != NULL;
mp = &m->m_nextpkt, m = *mp) {
if (p > priority(vifp, mtod(m, struct ip *))) {
if ((*mp = m->m_nextpkt) == NULL)
vifp->tbf_t = mp;
--vifp->tbf_q_len;
if (mrtdebug & DEBUG_XMIT)
log(LOG_DEBUG, "phyint_send on vif %ld err %d\n",
(long)(vifp - viftable), error);
}
splx(s);
}
/* determine the current time and then
* the elapsed time (between the last time and time now)
* in milliseconds & update the no. of tokens in the bucket
*/
static void
tbf_update_tokens(struct vif *vifp)
{
struct timeval tp;
u_int32_t tm;
int s = splsoftnet();
microtime(&tp);
TV_DELTA(tp, vifp->tbf_last_pkt_t, tm);
/*
* This formula is actually
* "time in seconds" * "bytes/second".
*
* (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8)
*
* The (1000/1024) was introduced in add_vif to optimize
* this divide into a shift.
*/
vifp->tbf_n_tok += tm * vifp->v_rate_limit / 8192;
vifp->tbf_last_pkt_t = tp;
if (vifp->tbf_n_tok > MAX_BKT_SIZE)
vifp->tbf_n_tok = MAX_BKT_SIZE;
splx(s);
}
static int
priority(struct vif *vifp, struct ip *ip)
{
int prio = 50; /* the lowest priority -- default case */
/* temporary hack; may add general packet classifier some day */
/*
* XXX XXX: We're reading the UDP header, but we didn't ensure
* it was present in the packet.
*/
/*
* The UDP port space is divided up into four priority ranges:
* [0, 16384) : unclassified - lowest priority
* [16384, 32768) : audio - highest priority
* [32768, 49152) : whiteboard - medium priority
* [49152, 65536) : video - low priority
*/
if (ip->ip_p == IPPROTO_UDP) {
struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2));
switch (ntohs(udp->uh_dport) & 0xc000) {
case 0x4000:
prio = 70;
break;
case 0x8000:
prio = 60;
break;
case 0xc000:
prio = 55;
break;
}
/* Add the new bw_meter entry to the front of entries for this MFC */
x->bm_mfc = mfc;
x->bm_mfc_next = mfc->mfc_bw_meter;
mfc->mfc_bw_meter = x;
schedule_bw_meter(x, &now);
splx(s);
list = list->bm_mfc_next;
unschedule_bw_meter(x);
kmem_intr_free(x, sizeof(*x));
}
}
/*
* Delete one or multiple bw_meter entries
*/
static int
del_bw_upcall(struct bw_upcall *req)
{
int s;
struct mfc *mfc;
struct bw_meter *x;
if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
return EOPNOTSUPP;
s = splsoftnet();
/* Find the corresponding MFC entry */
mfc = mfc_find(&req->bu_src, &req->bu_dst);
if (mfc == NULL) {
splx(s);
return EADDRNOTAVAIL;
} else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
/*
* Delete all bw_meter entries for this mfc
*/
struct bw_meter *list;
list = mfc->mfc_bw_meter;
mfc->mfc_bw_meter = NULL;
free_bw_list(list);
splx(s);
return 0;
} else { /* Delete a single bw_meter entry */
struct bw_meter *prev;
uint32_t flags = 0;
flags = compute_bw_meter_flags(req);
/* Find the bw_meter entry to delete */
for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
prev = x, x = x->bm_mfc_next) {
if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
&req->bu_threshold.b_time, ==)) &&
(x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
(x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
(x->bm_flags & BW_METER_USER_FLAGS) == flags)
break;
}
if (x != NULL) { /* Delete entry from the list for this MFC */
if (prev != NULL)
prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/
else
x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
/*
* Perform bandwidth measurement processing that may result in an upcall
*/
static void
bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
{
struct timeval delta;
if (x->bm_flags & BW_METER_GEQ) {
/*
* Processing for ">=" type of bw_meter entry
*/
if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
/* Reset the bw_meter entry */
x->bm_start_time = *nowp;
x->bm_measured.b_packets = 0;
x->bm_measured.b_bytes = 0;
x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
}
/* Record that a packet is received */
x->bm_measured.b_packets++;
x->bm_measured.b_bytes += plen;
/*
* Test if we should deliver an upcall
*/
if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
(x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
((x->bm_flags & BW_METER_UNIT_BYTES) &&
(x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
/* Prepare an upcall for delivery */
bw_meter_prepare_upcall(x, nowp);
x->bm_flags |= BW_METER_UPCALL_DELIVERED;
}
}
} else if (x->bm_flags & BW_METER_LEQ) {
/*
* Processing for "<=" type of bw_meter entry
*/
if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
/*
* We are behind time with the multicast forwarding table
* scanning for "<=" type of bw_meter entries, so test now
* if we should deliver an upcall.
*/
if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
(x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
((x->bm_flags & BW_METER_UNIT_BYTES) &&
(x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
/* Prepare an upcall for delivery */
bw_meter_prepare_upcall(x, nowp);
}
/* Reschedule the bw_meter entry */
unschedule_bw_meter(x);
schedule_bw_meter(x, nowp);
}
/* Record that a packet is received */
x->bm_measured.b_packets++;
x->bm_measured.b_bytes += plen;
/*
* Test if we should restart the measuring interval
*/
if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
(x->bm_flags & BW_METER_UNIT_BYTES &&
x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
/* Don't restart the measuring interval */
} else {
/* Do restart the measuring interval */
/*
* XXX: note that we don't unschedule and schedule, because this
* might be too much overhead per packet. Instead, when we process
* all entries for a given timer hash bin, we check whether it is
* really a timeout. If not, we reschedule at that time.
*/
x->bm_start_time = *nowp;
x->bm_measured.b_packets = 0;
x->bm_measured.b_bytes = 0;
x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
}
}
}
if (bw_upcalls_n == 0)
return; /* No pending upcalls */
bw_upcalls_n = 0;
/*
* Allocate a new mbuf, initialize it with the header and
* the payload for the pending calls.
*/
MGETHDR(m, M_DONTWAIT, MT_HEADER);
if (m == NULL) {
log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
return;
}
/*
* Send the upcalls
* XXX do we need to set the address in k_igmpsrc ?
*/
mrtstat.mrts_upcalls++;
if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) {
log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
++mrtstat.mrts_upq_sockfull;
}
}
/*
* Compute the timeout hash value for the bw_meter entries
*/
#define BW_METER_TIMEHASH(bw_meter, hash) \
do { \
struct timeval next_timeval = (bw_meter)->bm_start_time; \
BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
(hash) = next_timeval.tv_sec; \
if (next_timeval.tv_usec) \
(hash)++; /* XXX: make sure we don't timeout early */ \
(hash) %= BW_METER_BUCKETS; \
} while (/*CONSTCOND*/ 0)
/*
* Schedule a timer to process periodically bw_meter entry of type "<="
* by linking the entry in the proper hash bucket.
*/
static void
schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
{
int time_hash;
if (!(x->bm_flags & BW_METER_LEQ))
return; /* XXX: we schedule timers only for "<=" entries */
/*
* Compute the timeout hash value and insert the entry
*/
BW_METER_TIMEHASH(x, time_hash);
x->bm_time_next = bw_meter_timers[time_hash];
bw_meter_timers[time_hash] = x;
x->bm_time_hash = time_hash;
}
/*
* Unschedule the periodic timer that processes bw_meter entry of type "<="
* by removing the entry from the proper hash bucket.
*/
static void
unschedule_bw_meter(struct bw_meter *x)
{
int time_hash;
struct bw_meter *prev, *tmp;
if (!(x->bm_flags & BW_METER_LEQ))
return; /* XXX: we schedule timers only for "<=" entries */
/*
* Compute the timeout hash value and delete the entry
*/
time_hash = x->bm_time_hash;
if (time_hash >= BW_METER_BUCKETS)
return; /* Entry was not scheduled */
/*
* Process all "<=" type of bw_meter that should be processed now,
* and for each entry prepare an upcall if necessary. Each processed
* entry is rescheduled again for the (periodic) processing.
*
* This is run periodically (once per second normally). On each round,
* all the potentially matching entries are in the hash slot that we are
* looking at.
*/
static void
bw_meter_process(void)
{
int s;
static uint32_t last_tv_sec; /* last time we processed this */
uint32_t loops;
int i;
struct timeval now, process_endtime;
microtime(&now);
if (last_tv_sec == now.tv_sec)
return; /* nothing to do */
s = splsoftnet();
/*
* Process all bins of bw_meter entries from the one after the last
* processed to the current one. On entry, i points to the last bucket
* visited, so we need to increment i at the beginning of the loop.
*/
for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
struct bw_meter *x, *tmp_list;
if (++i >= BW_METER_BUCKETS)
i = 0;
/* Disconnect the list of bw_meter entries from the bin */
tmp_list = bw_meter_timers[i];
bw_meter_timers[i] = NULL;
/* Process the list of bw_meter entries */
while (tmp_list != NULL) {
x = tmp_list;
tmp_list = tmp_list->bm_time_next;
/* Test if the time interval is over */
process_endtime = x->bm_start_time;
BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
/* Not yet: reschedule, but don't reset */
int time_hash;
BW_METER_TIMEHASH(x, time_hash);
if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
/*
* XXX: somehow the bin processing is a bit ahead of time.
* Put the entry in the next bin.
*/
if (++time_hash >= BW_METER_BUCKETS)
time_hash = 0;
}
x->bm_time_next = bw_meter_timers[time_hash];
bw_meter_timers[time_hash] = x;
x->bm_time_hash = time_hash;
continue;
}
/*
* Test if we should deliver an upcall
*/
if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
(x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
((x->bm_flags & BW_METER_UNIT_BYTES) &&
(x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
/* Prepare an upcall for delivery */
bw_meter_prepare_upcall(x, &now);
}
/*
* Reschedule for next processing
*/
schedule_bw_meter(x, &now);
}
}
/* Send all upcalls that are pending delivery */
bw_upcalls_send();
splx(s);
}
/*
* A periodic function for sending all upcalls that are pending delivery
*/
static void
expire_bw_upcalls_send(void *unused)
{
int s;
/*
* A periodic function for periodic scanning of the multicast forwarding
* table for processing all "<=" bw_meter entries.
*/
static void
expire_bw_meter_process(void *unused)
{
if (mrt_api_config & MRT_MFC_BW_UPCALL)
bw_meter_process();
#ifdef PIM
/*
* Send the packet up to the user daemon, or eventually do kernel encapsulation
*/
static int
pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
struct mfc *rt)
{
struct mbuf *mb_copy, *mm;
if (mrtdebug & DEBUG_PIM)
log(LOG_DEBUG, "pim_register_send: \n");
mb_copy = pim_register_prepare(ip, m);
if (mb_copy == NULL)
return ENOBUFS;
/*
* Send all the fragments. Note that the mbuf for each fragment
* is freed by the sending machinery.
*/
for (mm = mb_copy; mm; mm = mb_copy) {
mb_copy = mm->m_nextpkt;
mm->m_nextpkt = NULL;
mm = m_pullup(mm, sizeof(struct ip));
if (mm != NULL) {
ip = mtod(mm, struct ip *);
if ((mrt_api_config & MRT_MFC_RP) &&
!in_nullhost(rt->mfc_rp)) {
pim_register_send_rp(ip, vifp, mm, rt);
} else {
pim_register_send_upcall(ip, vifp, mm, rt);
}
}
}
return 0;
}
/*
* Return a copy of the data packet that is ready for PIM Register
* encapsulation.
* XXX: Note that in the returned copy the IP header is a valid one.
*/
static struct mbuf *
pim_register_prepare(struct ip *ip, struct mbuf *m)
{
struct mbuf *mb_copy = NULL;
int mtu;
/* Take care of delayed checksums */
if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
in_undefer_cksum_tcpudp(m);
m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
}
/*
* Copy the old packet & pullup its IP header into the
* new mbuf so we can modify it.
*/
mb_copy = m_copypacket(m, M_DONTWAIT);
if (mb_copy == NULL)
return NULL;
mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
if (mb_copy == NULL)
return NULL;
/* take care of the TTL */
ip = mtod(mb_copy, struct ip *);
--ip->ip_ttl;
/* Compute the MTU after the PIM Register encapsulation */
mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
if (ntohs(ip->ip_len) <= mtu) {
/* Turn the IP header into a valid one */
ip->ip_sum = 0;
ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
} else {
/* Fragment the packet */
if (ip_fragment(mb_copy, NULL, mtu) != 0) {
/* XXX: mb_copy was freed by ip_fragment() */
return NULL;
}
}
return mb_copy;
}
/*
* Send an upcall with the data packet to the user-level process.
*/
static int
pim_register_send_upcall(struct ip *ip, struct vif *vifp,
struct mbuf *mb_copy, struct mfc *rt)
{
struct mbuf *mb_first;
int len = ntohs(ip->ip_len);
struct igmpmsg *im;
struct sockaddr_in k_igmpsrc = {
.sin_len = sizeof(k_igmpsrc),
.sin_family = AF_INET,
};
/*
* Add a new mbuf with an upcall header
*/
MGETHDR(mb_first, M_DONTWAIT, MT_HEADER);
if (mb_first == NULL) {
m_freem(mb_copy);
return ENOBUFS;
}
mb_first->m_data += max_linkhdr;
mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
mb_first->m_len = sizeof(struct igmpmsg);
mb_first->m_next = mb_copy;
/*
* Encapsulate the data packet in PIM Register message and send it to the RP.
*/
static int
pim_register_send_rp(struct ip *ip, struct vif *vifp,
struct mbuf *mb_copy, struct mfc *rt)
{
struct mbuf *mb_first;
struct ip *ip_outer;
struct pim_encap_pimhdr *pimhdr;
int len = ntohs(ip->ip_len);
vifi_t vifi = rt->mfc_parent;
if ((vifi >= numvifs) || in_nullhost(viftable[vifi].v_lcl_addr)) {
m_freem(mb_copy);
return EADDRNOTAVAIL; /* The iif vif is invalid */
}
/*
* Add a new mbuf with the encapsulating header
*/
MGETHDR(mb_first, M_DONTWAIT, MT_HEADER);
if (mb_first == NULL) {
m_freem(mb_copy);
return ENOBUFS;
}
mb_first->m_data += max_linkhdr;
mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
mb_first->m_next = mb_copy;
mb_first->m_pkthdr.len = len + mb_first->m_len;
/*
* Fill in the encapsulating IP and PIM header
*/
ip_outer = mtod(mb_first, struct ip *);
*ip_outer = pim_encap_iphdr;
if (mb_first->m_pkthdr.len < IP_MINFRAGSIZE)
ip_outer->ip_id = 0;
else
ip_outer->ip_id = ip_newid();
ip_outer->ip_len = htons(len + sizeof(pim_encap_iphdr) +
sizeof(pim_encap_pimhdr));
ip_outer->ip_src = viftable[vifi].v_lcl_addr;
ip_outer->ip_dst = rt->mfc_rp;
/*
* Copy the inner header TOS to the outer header, and take care of the
* IP_DF bit.
*/
ip_outer->ip_tos = ip->ip_tos;
if (ntohs(ip->ip_off) & IP_DF)
ip_outer->ip_off |= htons(IP_DF);
pimhdr = (struct pim_encap_pimhdr *)((char *)ip_outer
+ sizeof(pim_encap_iphdr));
*pimhdr = pim_encap_pimhdr;
/* If the iif crosses a border, set the Border-bit */
if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config)
pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
/*
* PIM-SMv2 and PIM-DM messages processing.
* Receives and verifies the PIM control messages, and passes them
* up to the listening socket, using rip_input().
* The only message with special processing is the PIM_REGISTER message
* (used by PIM-SM): the PIM header is stripped off, and the inner packet
* is passed to if_simloop().
*/
void
pim_input(struct mbuf *m, int off, int proto)
{
struct ip *ip = mtod(m, struct ip *);
struct pim *pim;
int minlen;
int datalen;
int ip_tos;
int iphlen;
/*
* Validate lengths
*/
if (datalen < PIM_MINLEN) {
pimstat.pims_rcv_tooshort++;
log(LOG_ERR, "pim_input: packet size too small %d from %lx\n",
datalen, (u_long)ip->ip_src.s_addr);
m_freem(m);
return;
}
/*
* If the packet is at least as big as a REGISTER, go ahead
* and grab the PIM REGISTER header size, to avoid another
* possible m_pullup() later.
*
* PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8
* PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
*/
minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
/*
* Get the IP and PIM headers in contiguous memory, and
* possibly the PIM REGISTER header.
*/
if ((m->m_flags & M_EXT || m->m_len < minlen) &&
(m = m_pullup(m, minlen)) == NULL) {
log(LOG_ERR, "pim_input: m_pullup failure\n");
return;
}
ip = mtod(m, struct ip *);
ip_tos = ip->ip_tos;
/* adjust mbuf to point to the PIM header */
m->m_data += iphlen;
m->m_len -= iphlen;
pim = mtod(m, struct pim *);
/*
* Validate checksum. If PIM REGISTER, exclude the data packet.
*
* XXX: some older PIMv2 implementations don't make this distinction,
* so for compatibility reason perform the checksum over part of the
* message, and if error, then over the whole message.
*/
if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
/* do nothing, checksum okay */
} else if (in_cksum(m, datalen)) {
pimstat.pims_rcv_badsum++;
if (mrtdebug & DEBUG_PIM)
log(LOG_DEBUG, "pim_input: invalid checksum\n");
m_freem(m);
return;
}
/* PIM version check */
if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
pimstat.pims_rcv_badversion++;
log(LOG_ERR, "pim_input: incorrect version %d, expecting %d\n",
PIM_VT_V(pim->pim_vt), PIM_VERSION);
m_freem(m);
return;
}
/* restore mbuf back to the outer IP */
m->m_data -= iphlen;
m->m_len += iphlen;
if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
/*
* Since this is a REGISTER, we'll make a copy of the register
* headers ip + pim + u_int32 + encap_ip, to be passed up to the
* routing daemon.
*/
int s;
struct sockaddr_in dst = {
.sin_len = sizeof(dst),
.sin_family = AF_INET,
};
struct mbuf *mcp;
struct ip *encap_ip;
u_int32_t *reghdr;
struct ifnet *vifp;
s = splsoftnet();
if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) {
splx(s);
if (mrtdebug & DEBUG_PIM)
log(LOG_DEBUG,
"pim_input: register vif not set: %d\n", reg_vif_num);
m_freem(m);
return;
}
/* XXX need refcnt? */
vifp = viftable[reg_vif_num].v_ifp;
splx(s);
/*
* Validate length
*/
if (datalen < PIM_REG_MINLEN) {
pimstat.pims_rcv_tooshort++;
pimstat.pims_rcv_badregisters++;
log(LOG_ERR,
"pim_input: register packet size too small %d from %lx\n",
datalen, (u_long)ip->ip_src.s_addr);
m_freem(m);
return;
}
if (mrtdebug & DEBUG_PIM) {
log(LOG_DEBUG,
"pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n",
(u_long)ntohl(encap_ip->ip_src.s_addr),
(u_long)ntohl(encap_ip->ip_dst.s_addr),
ntohs(encap_ip->ip_len));
}
/* verify the version number of the inner packet */
if (encap_ip->ip_v != IPVERSION) {
pimstat.pims_rcv_badregisters++;
if (mrtdebug & DEBUG_PIM) {
log(LOG_DEBUG, "pim_input: invalid IP version (%d) "
"of the inner packet\n", encap_ip->ip_v);
}
m_freem(m);
return;
}
/* verify the inner packet doesn't have options */
if (encap_ip->ip_hl != (sizeof(struct ip) >> 2)) {
pimstat.pims_rcv_badregisters++;
m_freem(m);
return;
}
/* verify the inner packet is destined to a mcast group */
if (!IN_MULTICAST(encap_ip->ip_dst.s_addr)) {
pimstat.pims_rcv_badregisters++;
if (mrtdebug & DEBUG_PIM)
log(LOG_DEBUG,
"pim_input: inner packet of register is not "
"multicast %lx\n",
(u_long)ntohl(encap_ip->ip_dst.s_addr));
m_freem(m);
return;
}
/* If a NULL_REGISTER, pass it to the daemon */
if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
goto pim_input_to_daemon;
/*
* Copy the TOS from the outer IP header to the inner IP header.
*/
if (encap_ip->ip_tos != ip_tos) {
/* Outer TOS -> inner TOS */
encap_ip->ip_tos = ip_tos;
/* Recompute the inner header checksum. Sigh... */
/* adjust mbuf to point to the inner IP header */
m->m_data += (iphlen + PIM_MINLEN);
m->m_len -= (iphlen + PIM_MINLEN);
/* restore mbuf to point back to the outer IP header */
m->m_data -= (iphlen + PIM_MINLEN);
m->m_len += (iphlen + PIM_MINLEN);
}
/*
* Decapsulate the inner IP packet and loopback to forward it
* as a normal multicast packet. Also, make a copy of the
* outer_iphdr + pimhdr + reghdr + encap_iphdr
* to pass to the daemon later, so it can take the appropriate
* actions (e.g., send back PIM_REGISTER_STOP).
* XXX: here m->m_data points to the outer IP header.
*/
mcp = m_copym(m, 0, iphlen + PIM_REG_MINLEN, M_DONTWAIT);
if (mcp == NULL) {
log(LOG_ERR,
"pim_input: pim register: could not copy register head\n");
m_freem(m);
return;
}
/* Keep statistics */
/* XXX: registers_bytes include only the encap. mcast pkt */
pimstat.pims_rcv_registers_msgs++;
pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len);
/*
* forward the inner ip packet; point m_data at the inner ip.
*/
m_adj(m, iphlen + PIM_MINLEN);
if (mrtdebug & DEBUG_PIM) {
log(LOG_DEBUG,
"pim_input: forwarding decapsulated register: "
"src %lx, dst %lx, vif %d\n",
(u_long)ntohl(encap_ip->ip_src.s_addr),
(u_long)ntohl(encap_ip->ip_dst.s_addr),
reg_vif_num);
}
/* NB: vifp was collected above; can it change on us? */
looutput(vifp, m, (struct sockaddr *)&dst, NULL);
/* prepare the register head to send to the mrouting daemon */
m = mcp;
}
pim_input_to_daemon:
/*
* Pass the PIM message up to the daemon; if it is a Register message,
* pass the 'head' only up to the daemon. This includes the
* outer IP header, PIM header, PIM-Register header and the
* inner IP header.
* XXX: the outer IP header pkt size of a Register is not adjust to
* reflect the fact that the inner multicast data is truncated.
*/
/*
* Currently, pim_input() is always called holding softnet_lock
* by ipintr()(!NET_MPSAFE) or PR_INPUT_WRAP()(NET_MPSAFE).
*/
KASSERT(mutex_owned(softnet_lock));
rip_input(m, iphlen, proto);