/*
* Copyright (c) 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Atsushi Onoe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
senderr(ENETDOWN);
/*
* If the queueing discipline needs packet classification,
* do it before prepending link headers.
*/
IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
/*
* For unicast, we make a tag to store the lladdr of the
* destination. This might not be the first time we have seen
* the packet (for instance, the arp code might be trying to
* re-send it after receiving an arp reply) so we only
* allocate a tag if there isn't one there already. For
* multicast, we will eventually use a different tag to store
* the channel number.
*/
unicast = !(m0->m_flags & (M_BCAST | M_MCAST));
if (unicast) {
mtag = m_tag_find(m0, MTAG_FIREWIRE_HWADDR);
if (!mtag) {
mtag = m_tag_get(MTAG_FIREWIRE_HWADDR,
sizeof (struct ieee1394_hwaddr), M_NOWAIT);
if (!mtag) {
error = ENOMEM;
goto bad;
}
m_tag_prepend(m0, mtag);
}
hwdst = (struct ieee1394_hwaddr *)(mtag + 1);
} else {
hwdst = &baddr;
}
switch (dst->sa_family) {
#ifdef INET
case AF_INET:
if (unicast &&
(error = arpresolve(ifp, rt, m0, dst, hwdst,
sizeof(*hwdst))) != 0)
return error == EWOULDBLOCK ? 0 : error;
/* if broadcasting on a simplex interface, loopback a copy */
if ((m0->m_flags & M_BCAST) && (ifp->if_flags & IFF_SIMPLEX))
mcopy = m_copypacket(m0, M_DONTWAIT);
etype = htons(ETHERTYPE_IP);
break;
case AF_ARP:
ah = mtod(m0, struct arphdr *);
ah->ar_hrd = htons(ARPHRD_IEEE1394);
etype = htons(ETHERTYPE_ARP);
break;
#endif /* INET */
#ifdef INET6
case AF_INET6:
#if 0
/*
* XXX This code was in nd6_storelladdr, which was replaced with
* nd6_resolve, but it never be used because nd6_storelladdr was
* called only if unicast. Should it be enabled?
*/
if (m0->m_flags & M_BCAST)
memcpy(hwdst->iha_uid, ifp->if_broadcastaddr,
MIN(IEEE1394_ADDR_LEN, ifp->if_addrlen));
#endif
if (unicast) {
error = nd6_resolve(ifp, rt, m0, dst, hwdst->iha_uid,
IEEE1394_ADDR_LEN);
if (error != 0)
return error == EWOULDBLOCK ? 0 : error;
}
etype = htons(ETHERTYPE_IPV6);
break;
#endif /* INET6 */
case pseudo_AF_HDRCMPLT:
case AF_UNSPEC:
/* TODO? */
default:
printf("%s: can't handle af%d\n", ifp->if_xname,
dst->sa_family);
senderr(EAFNOSUPPORT);
break;
}
/*
* XXX:
* The maximum possible rate depends on the topology.
* So the determination of maxrec and fragmentation should be
* called from the driver after probing the topology map.
*/
if (unicast) {
hdrlen = IEEE1394_GASP_LEN;
hwdst->iha_speed = 0; /* XXX */
} else
hdrlen = 0;
if (hwdst->iha_speed > myaddr->iha_speed)
hwdst->iha_speed = myaddr->iha_speed;
if (hwdst->iha_maxrec > myaddr->iha_maxrec)
hwdst->iha_maxrec = myaddr->iha_maxrec;
if (hwdst->iha_maxrec > (8 + hwdst->iha_speed))
hwdst->iha_maxrec = 8 + hwdst->iha_speed;
if (hwdst->iha_maxrec < 8)
hwdst->iha_maxrec = 8;
for (rq = LIST_FIRST(&ic->ic_reassq); ; rq = LIST_NEXT(rq, rq_node)) {
if (rq == NULL) {
/*
* Create a new reassemble queue head for the node.
*/
rq = malloc(sizeof(*rq), M_FTABLE, M_NOWAIT);
if (rq == NULL) {
m_freem(m0);
return NULL;
}
rq->fr_id = id;
LIST_INIT(&rq->rq_pkt);
LIST_INSERT_HEAD(&ic->ic_reassq, rq, rq_node);
break;
}
if (rq->fr_id == id)
break;
}
for (rp = LIST_FIRST(&rq->rq_pkt); rp != NULL; rp = nrp) {
nrp = LIST_NEXT(rp, rp_next);
if (rp->rp_dgl != dgl)
continue;
/*
* sanity check:
* datagram size must be same for all fragments, and
* no overlap is allowed.
*/
if (rp->rp_size != size ||
(off < rp->rp_off + rp->rp_len && off + len > rp->rp_off)) {
/*
* This happens probably due to wrapping dgl value.
* Destroy all previously received fragment and
* enqueue current fragment.
*/
for (rp = LIST_FIRST(&rq->rq_pkt); rp != NULL;
rp = nrp) {
nrp = LIST_NEXT(rp, rp_next);
if (rp->rp_dgl == dgl) {
LIST_REMOVE(rp, rp_next);
m_freem(rp->rp_m);
free(rp, M_FTABLE);
}
}
break;
}
if (rp->rp_off + rp->rp_len == off) {
/*
* All the subsequent fragments received in sequence
* come here.
* Concatinate mbuf to previous one instead of
* allocating new reassemble queue structure,
* and try to merge more with the subsequent fragment
* in the queue.
*/
m_cat(rp->rp_m, m0);
rp->rp_len += len;
while (rp->rp_off + rp->rp_len < size &&
nrp != NULL && nrp->rp_dgl == dgl &&
nrp->rp_off == rp->rp_off + rp->rp_len) {
LIST_REMOVE(nrp, rp_next);
m_cat(rp->rp_m, nrp->rp_m);
rp->rp_len += nrp->rp_len;
free(nrp, M_FTABLE);
nrp = LIST_NEXT(rp, rp_next);
}
m0 = NULL; /* mark merged */
break;
}
if (off + m0->m_pkthdr.len == rp->rp_off) {
m_cat(m0, rp->rp_m);
rp->rp_m = m0;
rp->rp_off = off;
rp->rp_etype = etype; /* over writing trust etype */
rp->rp_len += len;
m0 = NULL; /* mark merged */
break;
}
if (rp->rp_off > off) {
/* insert before rp */
nrp = rp;
break;
}
if (nrp == NULL || nrp->rp_dgl != dgl) {
/* insert after rp */
nrp = NULL;
break;
}
}
if (m0 == NULL) {
if (rp->rp_off != 0 || rp->rp_len != size)
return NULL;
/* fragment done */
LIST_REMOVE(rp, rp_next);
m0 = rp->rp_m;
m0->m_pkthdr.len = rp->rp_len;
M_PREPEND(m0, sizeof(*iuh), M_DONTWAIT);
if (m0 != NULL) {
iuh = mtod(m0, struct ieee1394_unfraghdr *);
iuh->iuh_ft = 0;
iuh->iuh_etype = rp->rp_etype;
}
free(rp, M_FTABLE);
return m0;
}
/*
* New fragment received. Allocate reassemble queue structure.
*/
trp = malloc(sizeof(*trp), M_FTABLE, M_NOWAIT);
if (trp == NULL) {
m_freem(m0);
return NULL;
}
trp->rp_m = m0;
trp->rp_size = size;
trp->rp_etype = etype; /* valid only if off==0 */
trp->rp_off = off;
trp->rp_dgl = dgl;
trp->rp_len = len;
trp->rp_ttl = IEEE1394_REASS_TIMEOUT;
if (trp->rp_ttl <= ifp->if_timer)
trp->rp_ttl = ifp->if_timer + 1;
if (rp == NULL) {
/* first fragment for the dgl */
LIST_INSERT_HEAD(&rq->rq_pkt, trp, rp_next);
} else if (nrp == NULL) {
/* no next fragment for the dgl */
LIST_INSERT_AFTER(rp, trp, rp_next);
} else {
/* there is a hole */
LIST_INSERT_BEFORE(nrp, trp, rp_next);
}
return NULL;
}