/*
* Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Cisco Systems, Inc.
* 4. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* a list of sizes based on typical mtu's, used only if next hop
* size not returned.
*/
static int sctp_mtu_sizes[] = {
68,
296,
508,
512,
544,
576,
1006,
1492,
1500,
1536,
2002,
2048,
4352,
4464,
8166,
17914,
32000,
65535
};
int
find_next_best_mtu(int totsz)
{
int i, perfer;
/*
* if we are in here we must find the next best fit based on the
* size of the dg that failed to be sent.
*/
perfer = 0;
for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
if (totsz < sctp_mtu_sizes[i]) {
perfer = i - 1;
if (perfer < 0)
perfer = 0;
break;
}
}
return (sctp_mtu_sizes[perfer]);
}
SCTP_GETTIME_TIMEVAL(&now);
not_done = 1;
while (not_done) {
x = sctp_select_initial_TSN(&m->sctp_ep);
if (x == 0) {
/* we never use 0 */
continue;
}
if (sctp_is_vtag_good(m, x, &now)) {
not_done = 0;
}
}
return (x);
}
int
sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
int for_a_init, uint32_t override_tag )
{
/*
* Anything set to zero is taken care of by the allocation
* routine's bzero
*/
/*
* Up front select what scoping to apply on addresses I tell my peer
* Not sure what to do with these right now, we will need to come up
* with a way to set them. We may need to pass them through from the
* caller in the sctp_aloc_assoc() function.
*/
int i;
/* init all variables to a known value.*/
asoc->state = SCTP_STATE_INUSE;
asoc->max_burst = m->sctp_ep.max_burst;
asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
asoc->cookie_life = m->sctp_ep.def_cookie_life;
if (override_tag) {
asoc->my_vtag = override_tag;
} else {
asoc->my_vtag = sctp_select_a_tag(m);
}
asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
sctp_select_initial_TSN(&m->sctp_ep);
asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
/* we are opptimisitic here */
asoc->peer_supports_asconf = 1;
asoc->peer_supports_asconf_setprim = 1;
asoc->peer_supports_pktdrop = 1;
asoc->sent_queue_retran_cnt = 0;
/* This will need to be adjusted */
asoc->last_cwr_tsn = asoc->init_seq_number - 1;
asoc->last_acked_seq = asoc->init_seq_number - 1;
asoc->advanced_peer_ack_point = asoc->last_acked_seq;
asoc->asconf_seq_in = asoc->last_acked_seq;
/* here we are different, we hold the next one we expect */
asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
struct in6pcb *inp6;
/* Its a V6 socket */
inp6 = (struct in6pcb *)m;
asoc->ipv6_addr_legal = 1;
/* Now look at the binding flag to see if V4 will be legal */
if (
#if defined(__OpenBSD__)
(0) /* we always do dual bind */
#elif defined (__NetBSD__)
(inp6->in6p_flags & IN6P_IPV6_V6ONLY)
#else
(inp6->inp_flags & IN6P_IPV6_V6ONLY)
#endif
== 0) {
asoc->ipv4_addr_legal = 1;
} else {
/* V4 addresses are NOT legal on the association */
asoc->ipv4_addr_legal = 0;
}
} else {
/* Its a V4 socket, no - V6 */
asoc->ipv4_addr_legal = 1;
asoc->ipv6_addr_legal = 0;
}
LIST_INIT(&asoc->sctp_local_addr_list);
TAILQ_INIT(&asoc->nets);
TAILQ_INIT(&asoc->pending_reply_queue);
asoc->last_asconf_ack_sent = NULL;
/* Setup to fill the hb random cache at first HB */
asoc->hb_random_idx = 4;
/*
* Now the stream parameters, here we allocate space for all
* streams that we request by default.
*/
asoc->streamoutcnt = asoc->pre_open_streams =
m->sctp_ep.pre_open_stream_count;
asoc->strmout = malloc(asoc->streamoutcnt *
sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
if (asoc->strmout == NULL) {
/* big trouble no memory */
return (ENOMEM);
}
for (i = 0; i < asoc->streamoutcnt; i++) {
/*
* inbound side must be set to 0xffff,
* also NOTE when we get the INIT-ACK back (for INIT sender)
* we MUST reduce the count (streamoutcnt) but first check
* if we sent to any of the upper streams that were dropped
* (if some were). Those that were dropped must be notified
* to the upper layer as failed to send.
*/
asoc->strmout[i].next_sequence_sent = 0x0;
TAILQ_INIT(&asoc->strmout[i].outqueue);
asoc->strmout[i].stream_no = i;
asoc->strmout[i].next_spoke.tqe_next = 0;
asoc->strmout[i].next_spoke.tqe_prev = 0;
}
/* Now the mapping array */
asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
asoc->mapping_array = malloc(asoc->mapping_array_size,
M_PCB, M_NOWAIT);
if (asoc->mapping_array == NULL) {
free(asoc->strmout, M_PCB);
return (ENOMEM);
}
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
/* Now the init of the other outqueues */
TAILQ_INIT(&asoc->out_wheel);
TAILQ_INIT(&asoc->control_send_queue);
TAILQ_INIT(&asoc->send_queue);
TAILQ_INIT(&asoc->sent_queue);
TAILQ_INIT(&asoc->reasmqueue);
TAILQ_INIT(&asoc->delivery_queue);
asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
TAILQ_INIT(&asoc->asconf_queue);
return (0);
}
int
sctp_expand_mapping_array(struct sctp_association *asoc)
{
/* mapping array needs to grow */
u_int8_t *new_array;
uint16_t new_size, old_size;
SCTP_INP_WLOCK(inp);
if (inp->sctp_socket == 0) {
mutex_exit(softnet_lock);
SCTP_INP_WUNLOCK(inp);
return;
}
if (stcb) {
if (stcb->asoc.state == 0) {
mutex_exit(softnet_lock);
SCTP_INP_WUNLOCK(inp);
return;
}
}
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("Timer type %d goes off\n", tmr->type);
}
#endif /* SCTP_DEBUG */
#ifndef __NetBSD__
if (!callout_active(&tmr->timer)) {
SCTP_INP_WUNLOCK(inp);
return;
}
#endif
if (stcb) {
SCTP_TCB_LOCK(stcb);
}
SCTP_INP_INCR_REF(inp);
SCTP_INP_WUNLOCK(inp);
switch (tmr->type) {
case SCTP_TIMER_TYPE_ITERATOR:
{
struct sctp_iterator *it;
it = (struct sctp_iterator *)inp;
sctp_iterator_timer(it);
}
break;
/* call the handler for the appropriate timer type */
case SCTP_TIMER_TYPE_SEND:
sctp_pegs[SCTP_TMIT_TIMER]++;
stcb->asoc.num_send_timers_up--;
if (stcb->asoc.num_send_timers_up < 0) {
stcb->asoc.num_send_timers_up = 0;
}
if (sctp_t3rxt_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 1);
if ((stcb->asoc.num_send_timers_up == 0) &&
(stcb->asoc.sent_queue_cnt > 0)
) {
struct sctp_tmit_chunk *chk;
/*
* safeguard. If there on some on the sent queue
* somewhere but no timers running something is
* wrong... so we start a timer on the first chunk
* on the send queue on whatever net it is sent to.
*/
sctp_pegs[SCTP_T3_SAFEGRD]++;
chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
chk->whoTo);
}
break;
case SCTP_TIMER_TYPE_INIT:
if (sctp_t1init_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
/* We do output but not here */
did_output = 0;
break;
case SCTP_TIMER_TYPE_RECV:
sctp_pegs[SCTP_RECV_TIMER]++;
sctp_send_sack(stcb);
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 4);
break;
case SCTP_TIMER_TYPE_SHUTDOWN:
if (sctp_shutdown_timer(inp, stcb, net) ) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 5);
break;
case SCTP_TIMER_TYPE_HEARTBEAT:
if (sctp_heartbeat_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 6);
break;
case SCTP_TIMER_TYPE_COOKIE:
if (sctp_cookie_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 1);
break;
case SCTP_TIMER_TYPE_NEWCOOKIE:
{
struct timeval tv;
int i, secret;
SCTP_GETTIME_TIMEVAL(&tv);
SCTP_INP_WLOCK(inp);
inp->sctp_ep.time_of_secret_change = tv.tv_sec;
inp->sctp_ep.last_secret_number =
inp->sctp_ep.current_secret_number;
inp->sctp_ep.current_secret_number++;
if (inp->sctp_ep.current_secret_number >=
SCTP_HOW_MANY_SECRETS) {
inp->sctp_ep.current_secret_number = 0;
}
secret = (int)inp->sctp_ep.current_secret_number;
for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
inp->sctp_ep.secret_key[secret][i] =
sctp_select_initial_TSN(&inp->sctp_ep);
}
SCTP_INP_WUNLOCK(inp);
sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
}
did_output = 0;
break;
case SCTP_TIMER_TYPE_PATHMTURAISE:
sctp_pathmtu_timer(inp, stcb, net);
did_output = 0;
break;
case SCTP_TIMER_TYPE_SHUTDOWNACK:
if (sctp_shutdownack_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 7);
break;
case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
sctp_abort_an_association(inp, stcb,
SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
/* no need to unlock on tcb its gone */
goto out_decr;
break;
case SCTP_TIMER_TYPE_STRRESET:
if (sctp_strreset_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
sctp_chunk_output(inp, stcb, 9);
break;
case SCTP_TIMER_TYPE_ASCONF:
if (sctp_asconf_timer(inp, stcb, net)) {
/* no need to unlock on tcb its gone */
goto out_decr;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
#endif
sctp_chunk_output(inp, stcb, 8);
break;
case SCTP_TIMER_TYPE_AUTOCLOSE:
sctp_autoclose_timer(inp, stcb, net);
sctp_chunk_output(inp, stcb, 10);
did_output = 0;
break;
case SCTP_TIMER_TYPE_INPKILL:
/* special case, take away our
* increment since WE are the killer
*/
SCTP_INP_WLOCK(inp);
SCTP_INP_DECR_REF(inp);
SCTP_INP_WUNLOCK(inp);
sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
sctp_inpcb_free(inp, 1);
goto out_no_decr;
break;
default:
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("sctp_timeout_handler:unknown timer %d\n",
tmr->type);
}
#endif /* SCTP_DEBUG */
break;
};
#ifdef SCTP_AUDITING_ENABLED
sctp_audit_log(0xF1, (u_int8_t)tmr->type);
sctp_auditing(5, inp, stcb, net);
#endif
if (did_output) {
/*
* Now we need to clean up the control chunk chain if an
* ECNE is on it. It must be marked as UNSENT again so next
* call will continue to send it until such time that we get
* a CWR, to remove it. It is, however, less likely that we
* will find a ecn echo on the chain though.
*/
sctp_fix_ecn_echo(&stcb->asoc);
}
if (stcb) {
SCTP_TCB_UNLOCK(stcb);
}
out_decr:
SCTP_INP_WLOCK(inp);
SCTP_INP_DECR_REF(inp);
SCTP_INP_WUNLOCK(inp);
out_no_decr:
mutex_exit(softnet_lock);
}
int
sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_nets *net)
{
int to_ticks;
struct sctp_timer *tmr;
if (inp == NULL)
return (EFAULT);
to_ticks = 0;
tmr = NULL;
switch (t_type) {
case SCTP_TIMER_TYPE_ITERATOR:
{
struct sctp_iterator *it;
it = (struct sctp_iterator *)inp;
tmr = &it->tmr;
to_ticks = SCTP_ITERATOR_TICKS;
}
break;
case SCTP_TIMER_TYPE_SEND:
/* Here we use the RTO timer */
{
int rto_val;
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
if (net->RTO == 0) {
rto_val = stcb->asoc.initial_rto;
} else {
rto_val = net->RTO;
}
to_ticks = MSEC_TO_TICKS(rto_val);
}
break;
case SCTP_TIMER_TYPE_INIT:
/*
* Here we use the INIT timer default
* usually about 1 minute.
*/
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
break;
case SCTP_TIMER_TYPE_RECV:
/*
* Here we use the Delayed-Ack timer value from the inp
* ususually about 200ms.
*/
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.dack_timer;
to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
break;
case SCTP_TIMER_TYPE_SHUTDOWN:
/* Here we use the RTO of the destination. */
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_HEARTBEAT:
/*
* the net is used here so that we can add in the RTO.
* Even though we use a different timer. We also add the
* HB timer PLUS a random jitter.
*/
if (stcb == NULL) {
return (EFAULT);
}
{
uint32_t rndval;
uint8_t this_random;
int cnt_of_unconf=0;
struct sctp_nets *lnet;
TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
cnt_of_unconf++;
}
}
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
cnt_of_unconf, stcb->asoc.heart_beat_delay);
}
#endif
if (stcb->asoc.hb_random_idx > 3) {
rndval = sctp_select_initial_TSN(&inp->sctp_ep);
memcpy(stcb->asoc.hb_random_values, &rndval,
sizeof(stcb->asoc.hb_random_values));
this_random = stcb->asoc.hb_random_values[0];
stcb->asoc.hb_random_idx = 0;
stcb->asoc.hb_ect_randombit = 0;
} else {
this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
stcb->asoc.hb_random_idx++;
stcb->asoc.hb_ect_randombit = 0;
}
/*
* this_random will be 0 - 256 ms
* RTO is in ms.
*/
if ((stcb->asoc.heart_beat_delay == 0) &&
(cnt_of_unconf == 0)) {
/* no HB on this inp after confirmations */
return (0);
}
if (net) {
int delay;
delay = stcb->asoc.heart_beat_delay;
TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
(lnet->dest_state & SCTP_ADDR_REACHABLE)) {
delay = 0;
}
}
if (net->RTO == 0) {
/* Never been checked */
to_ticks = this_random + stcb->asoc.initial_rto + delay;
} else {
/* set rto_val to the ms */
to_ticks = delay + net->RTO + this_random;
}
} else {
if (cnt_of_unconf) {
to_ticks = this_random + stcb->asoc.initial_rto;
} else {
to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
}
}
/*
* Now we must convert the to_ticks that are now in
* ms to ticks.
*/
to_ticks *= hz;
to_ticks /= 1000;
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("Timer to expire in %d ticks\n", to_ticks);
}
#endif
tmr = &stcb->asoc.hb_timer;
}
break;
case SCTP_TIMER_TYPE_COOKIE:
/*
* Here we can use the RTO timer from the network since
* one RTT was compelete. If a retran happened then we will
* be using the RTO initial value.
*/
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_NEWCOOKIE:
/*
* nothing needed but the endpoint here
* ususually about 60 minutes.
*/
tmr = &inp->sctp_ep.signature_change;
to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
break;
case SCTP_TIMER_TYPE_INPKILL:
/*
* The inp is setup to die. We re-use the
* signature_change timer since that has
* stopped and we are in the GONE state.
*/
tmr = &inp->sctp_ep.signature_change;
to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
break;
case SCTP_TIMER_TYPE_PATHMTURAISE:
/*
* Here we use the value found in the EP for PMTU
* ususually about 10 minutes.
*/
if (stcb == NULL) {
return (EFAULT);
}
if (net == NULL) {
return (EFAULT);
}
to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
tmr = &net->pmtu_timer;
break;
case SCTP_TIMER_TYPE_SHUTDOWNACK:
/* Here we use the RTO of the destination */
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
/*
* Here we use the endpoints shutdown guard timer
* usually about 3 minutes.
*/
if (stcb == NULL) {
return (EFAULT);
}
to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
tmr = &stcb->asoc.shut_guard_timer;
break;
case SCTP_TIMER_TYPE_STRRESET:
/*
* Here the timer comes from the inp
* but its value is from the RTO.
*/
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
tmr = &stcb->asoc.strreset_timer;
break;
case SCTP_TIMER_TYPE_ASCONF:
/*
* Here the timer comes from the inp
* but its value is from the RTO.
*/
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
if (net->RTO == 0) {
to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
} else {
to_ticks = MSEC_TO_TICKS(net->RTO);
}
tmr = &stcb->asoc.asconf_timer;
break;
case SCTP_TIMER_TYPE_AUTOCLOSE:
if (stcb == NULL) {
return (EFAULT);
}
if (stcb->asoc.sctp_autoclose_ticks == 0) {
/* Really an error since stcb is NOT set to autoclose */
return (0);
}
to_ticks = stcb->asoc.sctp_autoclose_ticks;
tmr = &stcb->asoc.autoclose_timer;
break;
default:
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("sctp_timer_start:Unknown timer type %d\n",
t_type);
}
#endif /* SCTP_DEBUG */
return (EFAULT);
break;
};
if ((to_ticks <= 0) || (tmr == NULL)) {
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
t_type, to_ticks, tmr);
}
#endif /* SCTP_DEBUG */
return (EFAULT);
}
if (callout_pending(&tmr->timer)) {
/*
* we do NOT allow you to have it already running.
* if it is we leave the current one up unchanged
*/
return (EALREADY);
}
/* At this point we can proceed */
if (t_type == SCTP_TIMER_TYPE_SEND) {
stcb->asoc.num_send_timers_up++;
}
tmr->type = t_type;
tmr->ep = (void *)inp;
tmr->tcb = (void *)stcb;
tmr->net = (void *)net;
callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
return (0);
}
tmr = NULL;
switch (t_type) {
case SCTP_TIMER_TYPE_ITERATOR:
{
struct sctp_iterator *it;
it = (struct sctp_iterator *)inp;
tmr = &it->tmr;
}
break;
case SCTP_TIMER_TYPE_SEND:
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_INIT:
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_RECV:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.dack_timer;
break;
case SCTP_TIMER_TYPE_SHUTDOWN:
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_HEARTBEAT:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.hb_timer;
break;
case SCTP_TIMER_TYPE_COOKIE:
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_NEWCOOKIE:
/* nothing needed but the endpoint here */
tmr = &inp->sctp_ep.signature_change;
/* We re-use the newcookie timer for
* the INP kill timer. We must assure
* that we do not kill it by accident.
*/
break;
case SCTP_TIMER_TYPE_INPKILL:
/*
* The inp is setup to die. We re-use the
* signature_change timer since that has
* stopped and we are in the GONE state.
*/
tmr = &inp->sctp_ep.signature_change;
break;
case SCTP_TIMER_TYPE_PATHMTURAISE:
if (stcb == NULL) {
return (EFAULT);
}
if (net == NULL) {
return (EFAULT);
}
tmr = &net->pmtu_timer;
break;
case SCTP_TIMER_TYPE_SHUTDOWNACK:
if ((stcb == NULL) || (net == NULL)) {
return (EFAULT);
}
tmr = &net->rxt_timer;
break;
case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.shut_guard_timer;
break;
case SCTP_TIMER_TYPE_STRRESET:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.strreset_timer;
break;
case SCTP_TIMER_TYPE_ASCONF:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.asconf_timer;
break;
case SCTP_TIMER_TYPE_AUTOCLOSE:
if (stcb == NULL) {
return (EFAULT);
}
tmr = &stcb->asoc.autoclose_timer;
break;
default:
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
printf("sctp_timer_stop:Unknown timer type %d\n",
t_type);
}
#endif /* SCTP_DEBUG */
break;
};
if (tmr == NULL)
return (EFAULT);
if ((tmr->type != t_type) && tmr->type) {
/*
* Ok we have a timer that is under joint use. Cookie timer
* per chance with the SEND timer. We therefore are NOT
* running the timer that the caller wants stopped. So just
* return.
*/
return (0);
}
if (t_type == SCTP_TIMER_TYPE_SEND) {
stcb->asoc.num_send_timers_up--;
if (stcb->asoc.num_send_timers_up < 0) {
stcb->asoc.num_send_timers_up = 0;
}
}
callout_stop(&tmr->timer);
return (0);
}
u_int32_t
sctp_calculate_len(struct mbuf *m)
{
u_int32_t tlen=0;
struct mbuf *at;
at = m;
while (at) {
tlen += at->m_len;
at = at->m_next;
}
return (tlen);
}
uint32_t
sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
{
/*
* given a mbuf chain with a packetheader offset by 'offset'
* pointing at a sctphdr (with csum set to 0) go through
* the chain of m_next's and calculate the SCTP checksum.
* This is CRC32c.
* Also has a side bonus calculate the total length
* of the mbuf chain.
* Note: if offset is greater than the total mbuf length,
* checksum=1, pktlen=0 is returned (ie. no real error code)
*/
int32_t tlen=0;
uint32_t base = 0xffffffff;
struct mbuf *at;
at = m;
/* find the correct mbuf and offset into mbuf */
while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
offset -= at->m_len; /* update remaining offset left */
at = at->m_next;
}
while (at != NULL) {
base = update_crc32(base, at->m_data + offset,
at->m_len - offset);
tlen += at->m_len - offset;
/* we only offset once into the first mbuf */
if (offset) {
offset = 0;
}
at = at->m_next;
}
if (pktlen != NULL) {
*pktlen = tlen;
}
/* CRC-32c */
base = sctp_csum_finalize(base);
return (base);
}
void
sctp_mtu_size_reset(struct sctp_inpcb *inp,
struct sctp_association *asoc, u_long mtu)
{
/*
* Reset the P-MTU size on this association, this involves changing
* the asoc MTU, going through ANY chunk+overhead larger than mtu
* to allow the DF flag to be cleared.
*/
struct sctp_tmit_chunk *chk;
struct sctp_stream_out *strm;
unsigned int eff_mtu, ovh;
asoc->smallest_mtu = mtu;
if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
ovh = SCTP_MIN_OVERHEAD;
} else {
ovh = SCTP_MIN_V4_OVERHEAD;
}
eff_mtu = mtu - ovh;
/* Now mark any chunks that need to let IP fragment */
TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
if (chk->send_size > eff_mtu) {
chk->flags &= SCTP_DONT_FRAGMENT;
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
}
}
}
TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
if (chk->send_size > eff_mtu) {
chk->flags &= SCTP_DONT_FRAGMENT;
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
}
}
TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
if (chk->send_size > eff_mtu) {
chk->flags &= SCTP_DONT_FRAGMENT;
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
}
}
}
/*
* given an association and starting time of the current RTT period
* return RTO in number of usecs
* net should point to the current network
*/
u_int32_t
sctp_calculate_rto(struct sctp_tcb *stcb,
struct sctp_association *asoc,
struct sctp_nets *net,
struct timeval *old)
{
/*
* given an association and the starting time of the current RTT
* period (in value1/value2) return RTO in number of usecs.
*/
int calc_time = 0;
unsigned int new_rto = 0;
int first_measure = 0;
struct timeval now;
/************************/
/* 1. calculate new RTT */
/************************/
/* get the current time */
SCTP_GETTIME_TIMEVAL(&now);
/* compute the RTT value */
if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
calc_time += (((u_long)now.tv_usec -
(u_long)old->tv_usec)/1000);
} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
/* Borrow 1,000ms from current calculation */
calc_time -= 1000;
/* Add in the slop over */
calc_time += ((int)now.tv_usec/1000);
/* Add in the pre-second ms's */
calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
}
} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
calc_time = ((u_long)now.tv_usec -
(u_long)old->tv_usec)/1000;
} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
/* impossible .. garbage in nothing out */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
} else {
/* impossible .. garbage in nothing out */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
}
} else {
/* Clock wrapped? */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
}
/***************************/
/* 2. update RTTVAR & SRTT */
/***************************/
#if 0
/* if (net->lastsv || net->lastsa) {*/
/* per Section 5.3.1 C3 in SCTP */
/* net->lastsv = (int) *//* RTTVAR */
/* (((double)(1.0 - 0.25) * (double)net->lastsv) +
(double)(0.25 * (double)abs(net->lastsa - calc_time)));
net->lastsa = (int) */ /* SRTT */
/*(((double)(1.0 - 0.125) * (double)net->lastsa) +
(double)(0.125 * (double)calc_time));
} else {
*//* the first RTT calculation, per C2 Section 5.3.1 */
/* net->lastsa = calc_time; *//* SRTT */
/* net->lastsv = calc_time / 2; *//* RTTVAR */
/* }*/
/* if RTTVAR goes to 0 you set to clock grainularity */
/* if (net->lastsv == 0) {
net->lastsv = SCTP_CLOCK_GRANULARITY;
}
new_rto = net->lastsa + 4 * net->lastsv;
*/
#endif
/* this is Van Jacobson's integer version */
if (net->RTO) {
calc_time -= (net->lastsa >> 3);
net->lastsa += calc_time;
if (calc_time < 0) {
calc_time = -calc_time;
}
calc_time -= (net->lastsv >> 2);
net->lastsv += calc_time;
if (net->lastsv == 0) {
net->lastsv = SCTP_CLOCK_GRANULARITY;
}
} else {
/* First RTO measurement */
net->lastsa = calc_time;
net->lastsv = calc_time >> 1;
first_measure = 1;
}
new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
(stcb->asoc.sat_network_lockout == 0)) {
stcb->asoc.sat_network = 1;
} else if ((!first_measure) && stcb->asoc.sat_network) {
stcb->asoc.sat_network = 0;
stcb->asoc.sat_network_lockout = 1;
}
/* bound it, per C6/C7 in Section 5.3.1 */
if (new_rto < stcb->asoc.minrto) {
new_rto = stcb->asoc.minrto;
}
if (new_rto > stcb->asoc.maxrto) {
new_rto = stcb->asoc.maxrto;
}
/* we are now returning the RTT Smoothed */
return ((u_int32_t)new_rto);
}
/*
* return a pointer to a contiguous piece of data from the given
* mbuf chain starting at 'off' for 'len' bytes. If the desired
* piece spans more than one mbuf, a copy is made at 'ptr'.
* caller must ensure that the buffer size is >= 'len'
* returns NULL if there there isn't 'len' bytes in the chain.
*/
void *
sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
{
uint32_t count;
uint8_t *ptr;
ptr = in_ptr;
if ((off < 0) || (len <= 0))
return (NULL);
/* find the desired start location */
while ((m != NULL) && (off > 0)) {
if (off < m->m_len)
break;
off -= m->m_len;
m = m->m_next;
}
if (m == NULL)
return (NULL);
/* is the current mbuf large enough (eg. contiguous)? */
if ((m->m_len - off) >= len) {
return ((void *)(mtod(m, vaddr_t) + off));
} else {
/* else, it spans more than one mbuf, so save a temp copy... */
while ((m != NULL) && (len > 0)) {
count = uimin(m->m_len - off, len);
memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
len -= count;
ptr += count;
off = 0;
m = m->m_next;
}
if ((m == NULL) && (len > 0))
return (NULL);
else
return ((void *)in_ptr);
}
}
struct sctp_paramhdr *
sctp_get_next_param(struct mbuf *m,
int offset,
struct sctp_paramhdr *pull,
int pull_limit)
{
/* This just provides a typed signature to Peter's Pull routine */
return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
(u_int8_t *)pull));
}
int
sctp_add_pad_tombuf(struct mbuf *m, int padlen)
{
/*
* add padlen bytes of 0 filled padding to the end of the mbuf.
* If padlen is > 3 this routine will fail.
*/
u_int8_t *dp;
int i;
if (padlen > 3) {
return (ENOBUFS);
}
if (M_TRAILINGSPACE(m)) {
/*
* The easy way.
* We hope the majority of the time we hit here :)
*/
dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
m->m_len += padlen;
} else {
/* Hard way we must grow the mbuf */
struct mbuf *tmp;
MGET(tmp, M_DONTWAIT, MT_DATA);
if (tmp == NULL) {
/* Out of space GAK! we are in big trouble. */
return (ENOSPC);
}
/* setup and insert in middle */
tmp->m_next = m->m_next;
tmp->m_len = padlen;
m->m_next = tmp;
dp = mtod(tmp, u_int8_t *);
}
/* zero out the pad */
for (i= 0; i < padlen; i++) {
*dp = 0;
dp++;
}
return (0);
}
int
sctp_pad_lastmbuf(struct mbuf *m, int padval)
{
/* find the last mbuf in chain and pad it */
struct mbuf *m_at;
m_at = m;
while (m_at) {
if (m_at->m_next == NULL) {
return (sctp_add_pad_tombuf(m_at, padval));
}
m_at = m_at->m_next;
}
return (EFAULT);
}
#ifdef SCTP_DEBUG
printf("notify: %d\n", event);
#endif
/*
* First if we are going down dump everything we
* can to the socket rcv queue.
*/
if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
}
/*
* For TCP model AND UDP connected sockets we will send
* an error up when an ABORT comes in.
*/
if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
(event == SCTP_COMM_LOST)) {
stcb->sctp_socket->so_error = ECONNRESET;
/* Wake ANY sleepers */
sowwakeup(stcb->sctp_socket);
sorwakeup(stcb->sctp_socket);
}
#if 0
if ((event == SCTP_COMM_UP) &&
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
soisconnected(stcb->sctp_socket);
}
#endif
if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
/* event not enabled */
return;
}
MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
if (m_notify == NULL)
/* no space left */
return;
m_notify->m_len = 0;
/*
* For TCP model AND UDP connected sockets we will send
* an error up when an SHUTDOWN completes
*/
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
/* mark socket closed for read/write and wakeup! */
socantrcvmore(stcb->sctp_socket);
socantsendmore(stcb->sctp_socket);
}
if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
/* event not enabled */
return;
MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
if (m_notify == NULL)
/* no space left */
return;
m_notify->m_len = 0;
sse = mtod(m_notify, struct sctp_shutdown_event *);
sse->sse_type = SCTP_SHUTDOWN_EVENT;
sse->sse_flags = 0;
sse->sse_length = sizeof(struct sctp_shutdown_event);
sse->sse_assoc_id = sctp_get_associd(stcb);
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
return;
}
/* now through all the gunk freeing chunks */
TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
/* now clean up any chunks here */
chk = TAILQ_FIRST(&outs->outqueue);
while (chk) {
stcb->asoc.stream_queue_cnt--;
TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
sctp_m_freem(chk->data);
chk->data = NULL;
if (chk->whoTo)
sctp_free_remote_addr(chk->whoTo);
chk->whoTo = NULL;
chk->asoc = NULL;
/* Free the chunk */
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
sctppcbinfo.ipi_count_chunk--;
if ((int)sctppcbinfo.ipi_count_chunk < 0) {
panic("Chunk count is negative");
}
sctppcbinfo.ipi_gencnt_chunk++;
chk = TAILQ_FIRST(&outs->outqueue);
}
}
/* pending send queue SHOULD be empty */
if (!TAILQ_EMPTY(&asoc->send_queue)) {
chk = TAILQ_FIRST(&asoc->send_queue);
while (chk) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
sctp_m_freem(chk->data);
chk->data = NULL;
if (chk->whoTo)
sctp_free_remote_addr(chk->whoTo);
chk->whoTo = NULL;
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
sctppcbinfo.ipi_count_chunk--;
if ((int)sctppcbinfo.ipi_count_chunk < 0) {
panic("Chunk count is negative");
}
sctppcbinfo.ipi_gencnt_chunk++;
chk = TAILQ_FIRST(&asoc->send_queue);
}
}
/* sent queue SHOULD be empty */
if (!TAILQ_EMPTY(&asoc->sent_queue)) {
chk = TAILQ_FIRST(&asoc->sent_queue);
while (chk) {
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
SCTP_NOTIFY_DATAGRAM_SENT, chk);
sctp_m_freem(chk->data);
chk->data = NULL;
if (chk->whoTo)
sctp_free_remote_addr(chk->whoTo);
chk->whoTo = NULL;
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
sctppcbinfo.ipi_count_chunk--;
if ((int)sctppcbinfo.ipi_count_chunk < 0) {
panic("Chunk count is negative");
}
sctppcbinfo.ipi_gencnt_chunk++;
chk = TAILQ_FIRST(&asoc->sent_queue);
}
}
}
void
sctp_abort_notification(struct sctp_tcb *stcb, int error)
{
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
return;
}
/* Tell them we lost the asoc */
sctp_report_all_outbound(stcb);
sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
}
if (stcb == NULL) {
/* Got to have a TCB */
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
sctp_inpcb_free(inp, 1);
}
}
return;
}
/* notify the ulp */
if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
sctp_abort_notification(stcb, error);
/* notify the peer */
sctp_send_abort_tcb(stcb, op_err);
/* now free the asoc */
sctp_free_assoc(inp, stcb);
}
void
sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
struct sctp_inpcb *inp, struct mbuf *op_err)
{
struct sctp_chunkhdr *ch, chunk_buf;
unsigned int chk_length;
/* Generate a TO address for future reference */
if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
sctp_inpcb_free(inp, 1);
}
}
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
sizeof(*ch), (u_int8_t *)&chunk_buf);
while (ch != NULL) {
chk_length = ntohs(ch->chunk_length);
if (chk_length < sizeof(*ch)) {
/* break to abort land */
break;
}
switch (ch->chunk_type) {
case SCTP_PACKET_DROPPED:
/* we don't respond to pkt-dropped */
return;
case SCTP_ABORT_ASSOCIATION:
/* we don't respond with an ABORT to an ABORT */
return;
case SCTP_SHUTDOWN_COMPLETE:
/*
* we ignore it since we are not waiting for it
* and peer is gone
*/
return;
case SCTP_SHUTDOWN_ACK:
sctp_send_shutdown_complete2(m, iphlen, sh);
return;
default:
break;
}
offset += SCTP_SIZE32(chk_length);
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
sizeof(*ch), (u_int8_t *)&chunk_buf);
}
sctp_send_abort(m, iphlen, sh, 0, op_err);
}
/*
* check the inbound datagram to make sure there is not an abort
* inside it, if there is return 1, else return 0.
*/
int
sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
{
struct sctp_chunkhdr *ch;
struct sctp_init_chunk *init_chk, chunk_buf;
int offset;
unsigned int chk_length;
offset = iphlen + sizeof(struct sctphdr);
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
(u_int8_t *)&chunk_buf);
while (ch != NULL) {
chk_length = ntohs(ch->chunk_length);
if (chk_length < sizeof(*ch)) {
/* packet is probably corrupt */
break;
}
/* we seem to be ok, is it an abort? */
if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
/* yep, tell them */
return (1);
}
if (ch->chunk_type == SCTP_INITIATION) {
/* need to update the Vtag */
init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
if (init_chk != NULL) {
*vtagfill = ntohl(init_chk->init.initiate_tag);
}
}
/* Nope, move to the next chunk */
offset += SCTP_SIZE32(chk_length);
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
sizeof(*ch), (u_int8_t *)&chunk_buf);
}
return (0);
}
/*
* currently (2/02), ifa_addr embeds scope_id's and don't
* have sin6_scope_id set (i.e. it's 0)
* so, create this function to compare link local scopes
*/
uint32_t
sctp_is_same_scope(const struct sockaddr_in6 *addr1, const struct sockaddr_in6 *addr2)
{
struct sockaddr_in6 a, b;
/* save copies */
a = *addr1;
b = *addr2;
if (a.sin6_scope_id == 0)
if (sa6_recoverscope(&a)) {
/* can't get scope, so can't match */
return (0);
}
if (b.sin6_scope_id == 0)
if (sa6_recoverscope(&b)) {
/* can't get scope, so can't match */
return (0);
}
if (a.sin6_scope_id != b.sin6_scope_id)
return (0);
return (1);
}
/*
* returns a sockaddr_in6 with embedded scope recovered and removed
*/
const struct sockaddr_in6 *
sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
{
const struct sockaddr_in6 *newaddr;
newaddr = addr;
/* check and strip embedded scope junk */
if (addr->sin6_family == AF_INET6) {
if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
if (addr->sin6_scope_id == 0) {
*store = *addr;
if (sa6_recoverscope(store) == 0) {
/* use the recovered scope */
newaddr = store;
}
/* else, return the original "to" addr */
}
}
}
return (newaddr);
}
/*
* are the two addresses the same? currently a "scopeless" check
* returns: 1 if same, 0 if not
*/
int
sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
{
/* must be valid */
if (sa1 == NULL || sa2 == NULL)
return (0);
/* must be the same family */
if (sa1->sa_family != sa2->sa_family)
return (0);
static int
sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
{
struct mbuf *m;
/*
* given a mbuf chain, look through it finding
* the M_PKTHDR and return 1 if it belongs to
* the association given. We tell this by
* a kludge where we stuff the my_vtag of the asoc
* into the m->m_pkthdr.csum_data/csum field.
*/
m = this;
while (m) {
if (m->m_flags & M_PKTHDR) {
/* check it */
#if defined(__OpenBSD__)
if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
#else
if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
#endif
{
/* Yep */
return (1);
}
}
m = m->m_next;
}
return (0);
}
retval = 0;
if (so->so_rcv.sb_mb) {
/* grubbing time */
this = so->so_rcv.sb_mb;
while (this) {
at = this;
/* get to the m_pkthdr */
while (at) {
if (at->m_flags & M_PKTHDR)
break;
else {
at = at->m_next;
}
}
/* now do we have a m_pkthdr */
if (at && (at->m_flags & M_PKTHDR)) {
/* check it */
#if defined(__OpenBSD__)
if ((u_int32_t)at->m_pkthdr.csum != 0)
#else
if ((u_int32_t)at->m_pkthdr.csum_data != 0)
#endif
{
/* its the one */
#if defined(__OpenBSD__)
retval = (u_int32_t)at->m_pkthdr.csum;
#else
retval =
(u_int32_t)at->m_pkthdr.csum_data;
#endif
break;
}
}
this = this->m_nextpkt;
}
if (inp->sctp_vtag_first == asoc->my_vtag) {
/* First one must be moved */
struct mbuf *mm;
for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
/*
* Go down the chain and fix
* the space allocation of the
* two sockets.
*/
sbfree(old_sb, mm);
sballoc(new_sb, mm);
}
new_sb->sb_mb = old_sb->sb_mb;
old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
new_sb->sb_mb->m_nextpkt = NULL;
put = &new_sb->sb_mb->m_nextpkt;
moved_top = 1;
} else {
put = &new_sb->sb_mb;
}
take = &old_sb->sb_mb;
next = old_sb->sb_mb;
while (next) {
this = next;
/* position for next one */
next = this->m_nextpkt;
/* check the tag of this packet */
if (sctp_should_be_moved(this, asoc)) {
/* yes this needs to be moved */
struct mbuf *mm;
*take = this->m_nextpkt;
this->m_nextpkt = NULL;
*put = this;
for (mm = this; mm; mm = mm->m_next) {
/*
* Go down the chain and fix
* the space allocation of the
* two sockets.
*/
sbfree(old_sb, mm);
sballoc(new_sb, mm);
}
put = &this->m_nextpkt;
} else {
/* no advance our take point. */
take = &this->m_nextpkt;
}
}
if (moved_top) {
/*
* Ok so now we must re-position vtag_first to
* match the new first one since we moved the
* mbuf at the top.
*/
inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
}
}
int
sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
int reason, struct sctpchunk_listhead *queue)
{
int ret_sz = 0;
int notdone;
uint8_t foundeom = 0;
do {
ret_sz += tp1->book_size;
tp1->sent = SCTP_FORWARD_TSN_SKIP;
if (tp1->data) {
sctp_free_bufspace(stcb, &stcb->asoc, tp1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
sctp_m_freem(tp1->data);
tp1->data = NULL;
sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
}
if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
stcb->asoc.sent_queue_cnt_removeable--;
}
if (queue == &stcb->asoc.send_queue) {
TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
/* on to the sent queue */
TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
sctp_next);
stcb->asoc.sent_queue_cnt++;
}
if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
SCTP_DATA_NOT_FRAG) {
/* not frag'ed we ae done */
notdone = 0;
foundeom = 1;
} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
/* end of frag, we are done */
notdone = 0;
foundeom = 1;
} else {
/* Its a begin or middle piece, we must mark all of it */
notdone = 1;
tp1 = TAILQ_NEXT(tp1, sctp_next);
}
} while (tp1 && notdone);
if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
/*
* The multi-part message was scattered
* across the send and sent queue.
*/
tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
/*
* recurse throught the send_queue too, starting at the
* beginning.
*/
if (tp1) {
ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
&stcb->asoc.send_queue);
} else {
printf("hmm, nothing on the send queue and no EOM?\n");
}
}
return (ret_sz);
}
/*
* checks to see if the given address, sa, is one that is currently
* known by the kernel
* note: can't distinguish the same address on multiple interfaces and
* doesn't handle multiple addresses with different zone/scope id's
* note: ifa_ifwithaddr() compares the entire sockaddr struct
*/
struct ifaddr *
sctp_find_ifa_by_addr(struct sockaddr *sa)
{
struct ifnet *ifn;
struct ifaddr *ifa;
int s;
/* go through all our known interfaces */
s = pserialize_read_enter();
IFNET_READER_FOREACH(ifn) {
/* go through each interface addresses */
IFADDR_READER_FOREACH(ifa, ifn) {
/* correct family? */
if (ifa->ifa_addr->sa_family != sa->sa_family)
continue;
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6) {
/* IPv6 address */
struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
/* create a copy and clear scope */
memcpy(&sin6_tmp, sin1,
sizeof(struct sockaddr_in6));
sin1 = &sin6_tmp;
in6_clearscope(&sin1->sin6_addr);
}
sin2 = (struct sockaddr_in6 *)sa;
if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
sizeof(struct in6_addr)) == 0) {
/* found it */
pserialize_read_exit(s);
return (ifa);
}
} else
#endif
if (ifa->ifa_addr->sa_family == AF_INET) {
/* IPv4 address */
struct sockaddr_in *sin1, *sin2;
sin1 = (struct sockaddr_in *)ifa->ifa_addr;
sin2 = (struct sockaddr_in *)sa;
if (sin1->sin_addr.s_addr ==
sin2->sin_addr.s_addr) {
/* found it */
pserialize_read_exit(s);
return (ifa);
}
}
/* else, not AF_INET or AF_INET6, so skip */
} /* end foreach ifa */
} /* end foreach ifn */
pserialize_read_exit(s);
/* not found! */
return (NULL);
}
#ifdef __APPLE__
/*
* here we hack in a fix for Apple's m_copym for the case where the first mbuf
* in the chain is a M_PKTHDR and the length is zero
*/
static void
sctp_pkthdr_fix(struct mbuf *m)
{
struct mbuf *m_nxt;
if ((m->m_flags & M_PKTHDR) == 0) {
/* not a PKTHDR */
return;
}
if (m->m_len != 0) {
/* not a zero length PKTHDR mbuf */
return;
}
/* let's move in a word into the first mbuf... yes, ugly! */
m_nxt = m->m_next;
if (m_nxt == NULL) {
/* umm... not a very useful mbuf chain... */
return;
}
if ((size_t)m_nxt->m_len > sizeof(long)) {
/* move over a long */
bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
/* update mbuf data pointers and lengths */
m->m_len += sizeof(long);
m_nxt->m_data += sizeof(long);
m_nxt->m_len -= sizeof(long);
}
}
inline struct mbuf *
sctp_m_copym(struct mbuf *m, int off, int len, int wait)
{
sctp_pkthdr_fix(m);
return (m_copym(m, off, len, wait));
}
#endif /* __APPLE__ */