1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifdef __FreeBSD__
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 271221 2014-09-07 09:06:26Z tuexen $");
36#endif
37
38#include <netinet/sctp_os.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_var.h>
42#include <netinet/sctp_sysctl.h>
43#ifdef INET6
44#if defined(__Userspace__) || defined(__FreeBSD__)
45#include <netinet6/sctp6_var.h>
46#endif
47#endif
48#include <netinet/sctp_header.h>
49#include <netinet/sctp_output.h>
50#include <netinet/sctp_uio.h>
51#include <netinet/sctp_timer.h>
52#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
53#include <netinet/sctp_auth.h>
54#include <netinet/sctp_asconf.h>
55#include <netinet/sctp_bsd_addr.h>
56#if defined(__Userspace__)
57#include <netinet/sctp_constants.h>
58#endif
59#if defined(__FreeBSD__)
60#include <netinet/udp.h>
61#include <netinet/udp_var.h>
62#include <sys/proc.h>
63#endif
64
65#if defined(__APPLE__)
66#define APPLE_FILE_NO 8
67#endif
68
69#if defined(__Windows__)
70#if !defined(SCTP_LOCAL_TRACE_BUF)
71#include "eventrace_netinet.h"
72#include "sctputil.tmh" /* this is the file that will be auto generated */
73#endif
74#else
75#ifndef KTR_SCTP
76#define KTR_SCTP KTR_SUBSYS
77#endif
78#endif
79
80extern struct sctp_cc_functions sctp_cc_functions[];
81extern struct sctp_ss_functions sctp_ss_functions[];
82
83void
84sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
85{
86#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
87	struct sctp_cwnd_log sctp_clog;
88
89	sctp_clog.x.sb.stcb = stcb;
90	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
91	if (stcb)
92		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
93	else
94		sctp_clog.x.sb.stcb_sbcc = 0;
95	sctp_clog.x.sb.incr = incr;
96	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97	     SCTP_LOG_EVENT_SB,
98	     from,
99	     sctp_clog.x.misc.log1,
100	     sctp_clog.x.misc.log2,
101	     sctp_clog.x.misc.log3,
102	     sctp_clog.x.misc.log4);
103#endif
104}
105
106void
107sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
108{
109#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
110	struct sctp_cwnd_log sctp_clog;
111
112	sctp_clog.x.close.inp = (void *)inp;
113	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
114	if (stcb) {
115		sctp_clog.x.close.stcb = (void *)stcb;
116		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
117	} else {
118		sctp_clog.x.close.stcb = 0;
119		sctp_clog.x.close.state = 0;
120	}
121	sctp_clog.x.close.loc = loc;
122	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
123	     SCTP_LOG_EVENT_CLOSE,
124	     0,
125	     sctp_clog.x.misc.log1,
126	     sctp_clog.x.misc.log2,
127	     sctp_clog.x.misc.log3,
128	     sctp_clog.x.misc.log4);
129#endif
130}
131
132void
133rto_logging(struct sctp_nets *net, int from)
134{
135#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
136	struct sctp_cwnd_log sctp_clog;
137
138	memset(&sctp_clog, 0, sizeof(sctp_clog));
139	sctp_clog.x.rto.net = (void *) net;
140	sctp_clog.x.rto.rtt = net->rtt / 1000;
141	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142	     SCTP_LOG_EVENT_RTT,
143	     from,
144	     sctp_clog.x.misc.log1,
145	     sctp_clog.x.misc.log2,
146	     sctp_clog.x.misc.log3,
147	     sctp_clog.x.misc.log4);
148#endif
149}
150
151void
152sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
153{
154#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
155	struct sctp_cwnd_log sctp_clog;
156
157	sctp_clog.x.strlog.stcb = stcb;
158	sctp_clog.x.strlog.n_tsn = tsn;
159	sctp_clog.x.strlog.n_sseq = sseq;
160	sctp_clog.x.strlog.e_tsn = 0;
161	sctp_clog.x.strlog.e_sseq = 0;
162	sctp_clog.x.strlog.strm = stream;
163	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
164	     SCTP_LOG_EVENT_STRM,
165	     from,
166	     sctp_clog.x.misc.log1,
167	     sctp_clog.x.misc.log2,
168	     sctp_clog.x.misc.log3,
169	     sctp_clog.x.misc.log4);
170#endif
171}
172
173void
174sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
175{
176#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
177	struct sctp_cwnd_log sctp_clog;
178
179	sctp_clog.x.nagle.stcb = (void *)stcb;
180	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
181	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
182	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
183	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
184	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
185	     SCTP_LOG_EVENT_NAGLE,
186	     action,
187	     sctp_clog.x.misc.log1,
188	     sctp_clog.x.misc.log2,
189	     sctp_clog.x.misc.log3,
190	     sctp_clog.x.misc.log4);
191#endif
192}
193
194void
195sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
196{
197#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
198	struct sctp_cwnd_log sctp_clog;
199
200	sctp_clog.x.sack.cumack = cumack;
201	sctp_clog.x.sack.oldcumack = old_cumack;
202	sctp_clog.x.sack.tsn = tsn;
203	sctp_clog.x.sack.numGaps = gaps;
204	sctp_clog.x.sack.numDups = dups;
205	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
206	     SCTP_LOG_EVENT_SACK,
207	     from,
208	     sctp_clog.x.misc.log1,
209	     sctp_clog.x.misc.log2,
210	     sctp_clog.x.misc.log3,
211	     sctp_clog.x.misc.log4);
212#endif
213}
214
215void
216sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
217{
218#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
219	struct sctp_cwnd_log sctp_clog;
220
221	memset(&sctp_clog, 0, sizeof(sctp_clog));
222	sctp_clog.x.map.base = map;
223	sctp_clog.x.map.cum = cum;
224	sctp_clog.x.map.high = high;
225	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
226	     SCTP_LOG_EVENT_MAP,
227	     from,
228	     sctp_clog.x.misc.log1,
229	     sctp_clog.x.misc.log2,
230	     sctp_clog.x.misc.log3,
231	     sctp_clog.x.misc.log4);
232#endif
233}
234
235void
236sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
237{
238#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
239	struct sctp_cwnd_log sctp_clog;
240
241	memset(&sctp_clog, 0, sizeof(sctp_clog));
242	sctp_clog.x.fr.largest_tsn = biggest_tsn;
243	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
244	sctp_clog.x.fr.tsn = tsn;
245	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246	     SCTP_LOG_EVENT_FR,
247	     from,
248	     sctp_clog.x.misc.log1,
249	     sctp_clog.x.misc.log2,
250	     sctp_clog.x.misc.log3,
251	     sctp_clog.x.misc.log4);
252#endif
253}
254
255void
256sctp_log_mb(struct mbuf *m, int from)
257{
258#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
259	struct sctp_cwnd_log sctp_clog;
260
261	sctp_clog.x.mb.mp = m;
262	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
263	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
264	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
265	if (SCTP_BUF_IS_EXTENDED(m)) {
266		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
267#if defined(__APPLE__)
268		/* APPLE does not use a ref_cnt, but a forward/backward ref queue */
269#else
270		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
271#endif
272	} else {
273		sctp_clog.x.mb.ext = 0;
274		sctp_clog.x.mb.refcnt = 0;
275	}
276	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277	     SCTP_LOG_EVENT_MBUF,
278	     from,
279	     sctp_clog.x.misc.log1,
280	     sctp_clog.x.misc.log2,
281	     sctp_clog.x.misc.log3,
282	     sctp_clog.x.misc.log4);
283#endif
284}
285
286void
287sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
288{
289#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
290	struct sctp_cwnd_log sctp_clog;
291
292	if (control == NULL) {
293		SCTP_PRINTF("Gak log of NULL?\n");
294		return;
295	}
296	sctp_clog.x.strlog.stcb = control->stcb;
297	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
298	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
299	sctp_clog.x.strlog.strm = control->sinfo_stream;
300	if (poschk != NULL) {
301		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
302		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
303	} else {
304		sctp_clog.x.strlog.e_tsn = 0;
305		sctp_clog.x.strlog.e_sseq = 0;
306	}
307	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308	     SCTP_LOG_EVENT_STRM,
309	     from,
310	     sctp_clog.x.misc.log1,
311	     sctp_clog.x.misc.log2,
312	     sctp_clog.x.misc.log3,
313	     sctp_clog.x.misc.log4);
314#endif
315}
316
317void
318sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
319{
320#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
321	struct sctp_cwnd_log sctp_clog;
322
323	sctp_clog.x.cwnd.net = net;
324	if (stcb->asoc.send_queue_cnt > 255)
325		sctp_clog.x.cwnd.cnt_in_send = 255;
326	else
327		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
328	if (stcb->asoc.stream_queue_cnt > 255)
329		sctp_clog.x.cwnd.cnt_in_str = 255;
330	else
331		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
332
333	if (net) {
334		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
335		sctp_clog.x.cwnd.inflight = net->flight_size;
336		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
337		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
338		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
339	}
340	if (SCTP_CWNDLOG_PRESEND == from) {
341		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
342	}
343	sctp_clog.x.cwnd.cwnd_augment = augment;
344	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345	     SCTP_LOG_EVENT_CWND,
346	     from,
347	     sctp_clog.x.misc.log1,
348	     sctp_clog.x.misc.log2,
349	     sctp_clog.x.misc.log3,
350	     sctp_clog.x.misc.log4);
351#endif
352}
353
354#ifndef __APPLE__
355void
356sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
357{
358#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
359	struct sctp_cwnd_log sctp_clog;
360
361	memset(&sctp_clog, 0, sizeof(sctp_clog));
362	if (inp) {
363		sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
364
365	} else {
366		sctp_clog.x.lock.sock = (void *) NULL;
367	}
368	sctp_clog.x.lock.inp = (void *) inp;
369#if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
370	if (stcb) {
371		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
372	} else {
373		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
374	}
375	if (inp) {
376		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
377		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
378	} else {
379		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
380		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
381	}
382#if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
383	sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
384#else
385	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
386#endif
387	if (inp && (inp->sctp_socket)) {
388		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
389		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
390		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
391	} else {
392		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
393		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
394		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
395	}
396#endif
397	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
398	     SCTP_LOG_LOCK_EVENT,
399	     from,
400	     sctp_clog.x.misc.log1,
401	     sctp_clog.x.misc.log2,
402	     sctp_clog.x.misc.log3,
403	     sctp_clog.x.misc.log4);
404#endif
405}
406#endif
407
408void
409sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
410{
411#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
412	struct sctp_cwnd_log sctp_clog;
413
414	memset(&sctp_clog, 0, sizeof(sctp_clog));
415	sctp_clog.x.cwnd.net = net;
416	sctp_clog.x.cwnd.cwnd_new_value = error;
417	sctp_clog.x.cwnd.inflight = net->flight_size;
418	sctp_clog.x.cwnd.cwnd_augment = burst;
419	if (stcb->asoc.send_queue_cnt > 255)
420		sctp_clog.x.cwnd.cnt_in_send = 255;
421	else
422		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
423	if (stcb->asoc.stream_queue_cnt > 255)
424		sctp_clog.x.cwnd.cnt_in_str = 255;
425	else
426		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
427	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428	     SCTP_LOG_EVENT_MAXBURST,
429	     from,
430	     sctp_clog.x.misc.log1,
431	     sctp_clog.x.misc.log2,
432	     sctp_clog.x.misc.log3,
433	     sctp_clog.x.misc.log4);
434#endif
435}
436
437void
438sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
439{
440#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
441	struct sctp_cwnd_log sctp_clog;
442
443	sctp_clog.x.rwnd.rwnd = peers_rwnd;
444	sctp_clog.x.rwnd.send_size = snd_size;
445	sctp_clog.x.rwnd.overhead = overhead;
446	sctp_clog.x.rwnd.new_rwnd = 0;
447	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
448	     SCTP_LOG_EVENT_RWND,
449	     from,
450	     sctp_clog.x.misc.log1,
451	     sctp_clog.x.misc.log2,
452	     sctp_clog.x.misc.log3,
453	     sctp_clog.x.misc.log4);
454#endif
455}
456
457void
458sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
459{
460#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
461	struct sctp_cwnd_log sctp_clog;
462
463	sctp_clog.x.rwnd.rwnd = peers_rwnd;
464	sctp_clog.x.rwnd.send_size = flight_size;
465	sctp_clog.x.rwnd.overhead = overhead;
466	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
467	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
468	     SCTP_LOG_EVENT_RWND,
469	     from,
470	     sctp_clog.x.misc.log1,
471	     sctp_clog.x.misc.log2,
472	     sctp_clog.x.misc.log3,
473	     sctp_clog.x.misc.log4);
474#endif
475}
476
477void
478sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
479{
480#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
481	struct sctp_cwnd_log sctp_clog;
482
483	sctp_clog.x.mbcnt.total_queue_size = total_oq;
484	sctp_clog.x.mbcnt.size_change = book;
485	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
486	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
487	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488	     SCTP_LOG_EVENT_MBCNT,
489	     from,
490	     sctp_clog.x.misc.log1,
491	     sctp_clog.x.misc.log2,
492	     sctp_clog.x.misc.log3,
493	     sctp_clog.x.misc.log4);
494#endif
495}
496
497void
498sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
499{
500#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
501	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502	     SCTP_LOG_MISC_EVENT,
503	     from,
504	     a, b, c, d);
505#endif
506}
507
508void
509sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
510{
511#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
512	struct sctp_cwnd_log sctp_clog;
513
514	sctp_clog.x.wake.stcb = (void *)stcb;
515	sctp_clog.x.wake.wake_cnt = wake_cnt;
516	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
517	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
518	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
519
520	if (stcb->asoc.stream_queue_cnt < 0xff)
521		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
522	else
523		sctp_clog.x.wake.stream_qcnt = 0xff;
524
525	if (stcb->asoc.chunks_on_out_queue < 0xff)
526		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
527	else
528		sctp_clog.x.wake.chunks_on_oque = 0xff;
529
530	sctp_clog.x.wake.sctpflags = 0;
531	/* set in the defered mode stuff */
532	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
533		sctp_clog.x.wake.sctpflags |= 1;
534	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
535		sctp_clog.x.wake.sctpflags |= 2;
536	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
537		sctp_clog.x.wake.sctpflags |= 4;
538	/* what about the sb */
539	if (stcb->sctp_socket) {
540		struct socket *so = stcb->sctp_socket;
541
542		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
543	} else {
544		sctp_clog.x.wake.sbflags = 0xff;
545	}
546	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
547	     SCTP_LOG_EVENT_WAKE,
548	     from,
549	     sctp_clog.x.misc.log1,
550	     sctp_clog.x.misc.log2,
551	     sctp_clog.x.misc.log3,
552	     sctp_clog.x.misc.log4);
553#endif
554}
555
556void
557sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
558{
559#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
560	struct sctp_cwnd_log sctp_clog;
561
562	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
563	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
564	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
565	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
566	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
567	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
568	sctp_clog.x.blk.sndlen = sendlen;
569	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
570	     SCTP_LOG_EVENT_BLOCK,
571	     from,
572	     sctp_clog.x.misc.log1,
573	     sctp_clog.x.misc.log2,
574	     sctp_clog.x.misc.log3,
575	     sctp_clog.x.misc.log4);
576#endif
577}
578
579int
580sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
581{
582	/* May need to fix this if ktrdump does not work */
583	return (0);
584}
585
586#ifdef SCTP_AUDITING_ENABLED
587uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
588static int sctp_audit_indx = 0;
589
590static
591void
592sctp_print_audit_report(void)
593{
594	int i;
595	int cnt;
596
597	cnt = 0;
598	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
599		if ((sctp_audit_data[i][0] == 0xe0) &&
600		    (sctp_audit_data[i][1] == 0x01)) {
601			cnt = 0;
602			SCTP_PRINTF("\n");
603		} else if (sctp_audit_data[i][0] == 0xf0) {
604			cnt = 0;
605			SCTP_PRINTF("\n");
606		} else if ((sctp_audit_data[i][0] == 0xc0) &&
607		    (sctp_audit_data[i][1] == 0x01)) {
608			SCTP_PRINTF("\n");
609			cnt = 0;
610		}
611		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
612			    (uint32_t) sctp_audit_data[i][1]);
613		cnt++;
614		if ((cnt % 14) == 0)
615			SCTP_PRINTF("\n");
616	}
617	for (i = 0; i < sctp_audit_indx; i++) {
618		if ((sctp_audit_data[i][0] == 0xe0) &&
619		    (sctp_audit_data[i][1] == 0x01)) {
620			cnt = 0;
621			SCTP_PRINTF("\n");
622		} else if (sctp_audit_data[i][0] == 0xf0) {
623			cnt = 0;
624			SCTP_PRINTF("\n");
625		} else if ((sctp_audit_data[i][0] == 0xc0) &&
626		    (sctp_audit_data[i][1] == 0x01)) {
627			SCTP_PRINTF("\n");
628			cnt = 0;
629		}
630		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
631			    (uint32_t) sctp_audit_data[i][1]);
632		cnt++;
633		if ((cnt % 14) == 0)
634			SCTP_PRINTF("\n");
635	}
636	SCTP_PRINTF("\n");
637}
638
639void
640sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
641    struct sctp_nets *net)
642{
643	int resend_cnt, tot_out, rep, tot_book_cnt;
644	struct sctp_nets *lnet;
645	struct sctp_tmit_chunk *chk;
646
647	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
648	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
649	sctp_audit_indx++;
650	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651		sctp_audit_indx = 0;
652	}
653	if (inp == NULL) {
654		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655		sctp_audit_data[sctp_audit_indx][1] = 0x01;
656		sctp_audit_indx++;
657		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658			sctp_audit_indx = 0;
659		}
660		return;
661	}
662	if (stcb == NULL) {
663		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
664		sctp_audit_data[sctp_audit_indx][1] = 0x02;
665		sctp_audit_indx++;
666		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667			sctp_audit_indx = 0;
668		}
669		return;
670	}
671	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
672	sctp_audit_data[sctp_audit_indx][1] =
673	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
674	sctp_audit_indx++;
675	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676		sctp_audit_indx = 0;
677	}
678	rep = 0;
679	tot_book_cnt = 0;
680	resend_cnt = tot_out = 0;
681	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682		if (chk->sent == SCTP_DATAGRAM_RESEND) {
683			resend_cnt++;
684		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
685			tot_out += chk->book_size;
686			tot_book_cnt++;
687		}
688	}
689	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
690		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
691		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
692		sctp_audit_indx++;
693		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694			sctp_audit_indx = 0;
695		}
696		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
697			    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
698		rep = 1;
699		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
700		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
701		sctp_audit_data[sctp_audit_indx][1] =
702		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
703		sctp_audit_indx++;
704		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705			sctp_audit_indx = 0;
706		}
707	}
708	if (tot_out != stcb->asoc.total_flight) {
709		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
710		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
711		sctp_audit_indx++;
712		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
713			sctp_audit_indx = 0;
714		}
715		rep = 1;
716		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
717			    (int)stcb->asoc.total_flight);
718		stcb->asoc.total_flight = tot_out;
719	}
720	if (tot_book_cnt != stcb->asoc.total_flight_count) {
721		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
722		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
723		sctp_audit_indx++;
724		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
725			sctp_audit_indx = 0;
726		}
727		rep = 1;
728		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
729
730		stcb->asoc.total_flight_count = tot_book_cnt;
731	}
732	tot_out = 0;
733	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
734		tot_out += lnet->flight_size;
735	}
736	if (tot_out != stcb->asoc.total_flight) {
737		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
738		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
739		sctp_audit_indx++;
740		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
741			sctp_audit_indx = 0;
742		}
743		rep = 1;
744		SCTP_PRINTF("real flight:%d net total was %d\n",
745			    stcb->asoc.total_flight, tot_out);
746		/* now corrective action */
747		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
748
749			tot_out = 0;
750			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
751				if ((chk->whoTo == lnet) &&
752				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
753					tot_out += chk->book_size;
754				}
755			}
756			if (lnet->flight_size != tot_out) {
757				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
758					    (void *)lnet, lnet->flight_size,
759					    tot_out);
760				lnet->flight_size = tot_out;
761			}
762		}
763	}
764	if (rep) {
765		sctp_print_audit_report();
766	}
767}
768
769void
770sctp_audit_log(uint8_t ev, uint8_t fd)
771{
772
773	sctp_audit_data[sctp_audit_indx][0] = ev;
774	sctp_audit_data[sctp_audit_indx][1] = fd;
775	sctp_audit_indx++;
776	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
777		sctp_audit_indx = 0;
778	}
779}
780
781#endif
782
783/*
784 * sctp_stop_timers_for_shutdown() should be called
785 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
786 * state to make sure that all timers are stopped.
787 */
788void
789sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
790{
791	struct sctp_association *asoc;
792	struct sctp_nets *net;
793
794	asoc = &stcb->asoc;
795
796	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
797	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
798	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
799	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
800	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
801	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
802		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
803		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
804	}
805}
806
807/*
808 * a list of sizes based on typical mtu's, used only if next hop size not
809 * returned.
810 */
811static uint32_t sctp_mtu_sizes[] = {
812	68,
813	296,
814	508,
815	512,
816	544,
817	576,
818	1006,
819	1492,
820	1500,
821	1536,
822	2002,
823	2048,
824	4352,
825	4464,
826	8166,
827	17914,
828	32000,
829	65535
830};
831
832/*
833 * Return the largest MTU smaller than val. If there is no
834 * entry, just return val.
835 */
836uint32_t
837sctp_get_prev_mtu(uint32_t val)
838{
839	uint32_t i;
840
841	if (val <= sctp_mtu_sizes[0]) {
842		return (val);
843	}
844	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
845		if (val <= sctp_mtu_sizes[i]) {
846			break;
847		}
848	}
849	return (sctp_mtu_sizes[i - 1]);
850}
851
852/*
853 * Return the smallest MTU larger than val. If there is no
854 * entry, just return val.
855 */
856uint32_t
857sctp_get_next_mtu(uint32_t val)
858{
859	/* select another MTU that is just bigger than this one */
860	uint32_t i;
861
862	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
863		if (val < sctp_mtu_sizes[i]) {
864			return (sctp_mtu_sizes[i]);
865		}
866	}
867	return (val);
868}
869
870void
871sctp_fill_random_store(struct sctp_pcb *m)
872{
873	/*
874	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
875	 * our counter. The result becomes our good random numbers and we
876	 * then setup to give these out. Note that we do no locking to
877	 * protect this. This is ok, since if competing folks call this we
878	 * will get more gobbled gook in the random store which is what we
879	 * want. There is a danger that two guys will use the same random
880	 * numbers, but thats ok too since that is random as well :->
881	 */
882	m->store_at = 0;
883	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
884	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
885	    sizeof(m->random_counter), (uint8_t *)m->random_store);
886	m->random_counter++;
887}
888
889uint32_t
890sctp_select_initial_TSN(struct sctp_pcb *inp)
891{
892	/*
893	 * A true implementation should use random selection process to get
894	 * the initial stream sequence number, using RFC1750 as a good
895	 * guideline
896	 */
897	uint32_t x, *xp;
898	uint8_t *p;
899	int store_at, new_store;
900
901	if (inp->initial_sequence_debug != 0) {
902		uint32_t ret;
903
904		ret = inp->initial_sequence_debug;
905		inp->initial_sequence_debug++;
906		return (ret);
907	}
908 retry:
909	store_at = inp->store_at;
910	new_store = store_at + sizeof(uint32_t);
911	if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
912		new_store = 0;
913	}
914	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
915		goto retry;
916	}
917	if (new_store == 0) {
918		/* Refill the random store */
919		sctp_fill_random_store(inp);
920	}
921	p = &inp->random_store[store_at];
922	xp = (uint32_t *)p;
923	x = *xp;
924	return (x);
925}
926
927uint32_t
928sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
929{
930	uint32_t x;
931	struct timeval now;
932
933	if (check) {
934		(void)SCTP_GETTIME_TIMEVAL(&now);
935	}
936	for (;;) {
937		x = sctp_select_initial_TSN(&inp->sctp_ep);
938		if (x == 0) {
939			/* we never use 0 */
940			continue;
941		}
942		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
943			break;
944		}
945	}
946	return (x);
947}
948
949int
950sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
951               uint32_t override_tag, uint32_t vrf_id)
952{
953	struct sctp_association *asoc;
954	/*
955	 * Anything set to zero is taken care of by the allocation routine's
956	 * bzero
957	 */
958
959	/*
960	 * Up front select what scoping to apply on addresses I tell my peer
961	 * Not sure what to do with these right now, we will need to come up
962	 * with a way to set them. We may need to pass them through from the
963	 * caller in the sctp_aloc_assoc() function.
964	 */
965	int i;
966#if defined(SCTP_DETAILED_STR_STATS)
967	int j;
968#endif
969
970	asoc = &stcb->asoc;
971	/* init all variables to a known value. */
972	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
973	asoc->max_burst = inp->sctp_ep.max_burst;
974	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
975	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
976	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
977	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
978	asoc->ecn_supported = inp->ecn_supported;
979	asoc->prsctp_supported = inp->prsctp_supported;
980	asoc->auth_supported = inp->auth_supported;
981	asoc->asconf_supported = inp->asconf_supported;
982	asoc->reconfig_supported = inp->reconfig_supported;
983	asoc->nrsack_supported = inp->nrsack_supported;
984	asoc->pktdrop_supported = inp->pktdrop_supported;
985	asoc->sctp_cmt_pf = (uint8_t)0;
986	asoc->sctp_frag_point = inp->sctp_frag_point;
987	asoc->sctp_features = inp->sctp_features;
988	asoc->default_dscp = inp->sctp_ep.default_dscp;
989#ifdef INET6
990	if (inp->sctp_ep.default_flowlabel) {
991		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
992	} else {
993		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
994			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
995			asoc->default_flowlabel &= 0x000fffff;
996			asoc->default_flowlabel |= 0x80000000;
997		} else {
998			asoc->default_flowlabel = 0;
999		}
1000	}
1001#endif
1002	asoc->sb_send_resv = 0;
1003	if (override_tag) {
1004		asoc->my_vtag = override_tag;
1005	} else {
1006		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
1007	}
1008	/* Get the nonce tags */
1009	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1011	asoc->vrf_id = vrf_id;
1012
1013#ifdef SCTP_ASOCLOG_OF_TSNS
1014	asoc->tsn_in_at = 0;
1015	asoc->tsn_out_at = 0;
1016	asoc->tsn_in_wrapped = 0;
1017	asoc->tsn_out_wrapped = 0;
1018	asoc->cumack_log_at = 0;
1019	asoc->cumack_log_atsnt = 0;
1020#endif
1021#ifdef SCTP_FS_SPEC_LOG
1022	asoc->fs_index = 0;
1023#endif
1024	asoc->refcnt = 0;
1025	asoc->assoc_up_sent = 0;
1026	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1027	    sctp_select_initial_TSN(&inp->sctp_ep);
1028	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1029	/* we are optimisitic here */
1030	asoc->peer_supports_nat = 0;
1031	asoc->sent_queue_retran_cnt = 0;
1032
1033	/* for CMT */
1034        asoc->last_net_cmt_send_started = NULL;
1035
1036	/* This will need to be adjusted */
1037	asoc->last_acked_seq = asoc->init_seq_number - 1;
1038	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1039	asoc->asconf_seq_in = asoc->last_acked_seq;
1040
1041	/* here we are different, we hold the next one we expect */
1042	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1043
1044	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1045	asoc->initial_rto = inp->sctp_ep.initial_rto;
1046
1047	asoc->max_init_times = inp->sctp_ep.max_init_times;
1048	asoc->max_send_times = inp->sctp_ep.max_send_times;
1049	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1050	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1051	asoc->free_chunk_cnt = 0;
1052
1053	asoc->iam_blocking = 0;
1054	asoc->context = inp->sctp_context;
1055	asoc->local_strreset_support = inp->local_strreset_support;
1056	asoc->def_send = inp->def_send;
1057	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1058	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1059	asoc->pr_sctp_cnt = 0;
1060	asoc->total_output_queue_size = 0;
1061
1062	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1063		asoc->scope.ipv6_addr_legal = 1;
1064		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1065			asoc->scope.ipv4_addr_legal = 1;
1066		} else {
1067			asoc->scope.ipv4_addr_legal = 0;
1068		}
1069#if defined(__Userspace__)
1070			asoc->scope.conn_addr_legal = 0;
1071#endif
1072	} else {
1073		asoc->scope.ipv6_addr_legal = 0;
1074#if defined(__Userspace__)
1075		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1076			asoc->scope.conn_addr_legal = 1;
1077			asoc->scope.ipv4_addr_legal = 0;
1078		} else {
1079			asoc->scope.conn_addr_legal = 0;
1080			asoc->scope.ipv4_addr_legal = 1;
1081		}
1082#else
1083		asoc->scope.ipv4_addr_legal = 1;
1084#endif
1085	}
1086
1087	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1088	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1089
1090	asoc->smallest_mtu = inp->sctp_frag_point;
1091	asoc->minrto = inp->sctp_ep.sctp_minrto;
1092	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1093
1094	asoc->locked_on_sending = NULL;
1095	asoc->stream_locked_on = 0;
1096	asoc->ecn_echo_cnt_onq = 0;
1097	asoc->stream_locked = 0;
1098
1099	asoc->send_sack = 1;
1100
1101	LIST_INIT(&asoc->sctp_restricted_addrs);
1102
1103	TAILQ_INIT(&asoc->nets);
1104	TAILQ_INIT(&asoc->pending_reply_queue);
1105	TAILQ_INIT(&asoc->asconf_ack_sent);
1106	/* Setup to fill the hb random cache at first HB */
1107	asoc->hb_random_idx = 4;
1108
1109	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1110
1111	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1112	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1113
1114	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1115	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1116
1117	/*
1118	 * Now the stream parameters, here we allocate space for all streams
1119	 * that we request by default.
1120	 */
1121	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1122	    inp->sctp_ep.pre_open_stream_count;
1123	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1124		    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1125		    SCTP_M_STRMO);
1126	if (asoc->strmout == NULL) {
1127		/* big trouble no memory */
1128		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1129		return (ENOMEM);
1130	}
1131	for (i = 0; i < asoc->streamoutcnt; i++) {
1132		/*
1133		 * inbound side must be set to 0xffff, also NOTE when we get
1134		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1135		 * count (streamoutcnt) but first check if we sent to any of
1136		 * the upper streams that were dropped (if some were). Those
1137		 * that were dropped must be notified to the upper layer as
1138		 * failed to send.
1139		 */
1140		asoc->strmout[i].next_sequence_send = 0x0;
1141		TAILQ_INIT(&asoc->strmout[i].outqueue);
1142		asoc->strmout[i].chunks_on_queues = 0;
1143#if defined(SCTP_DETAILED_STR_STATS)
1144		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1145			asoc->strmout[i].abandoned_sent[j] = 0;
1146			asoc->strmout[i].abandoned_unsent[j] = 0;
1147		}
1148#else
1149		asoc->strmout[i].abandoned_sent[0] = 0;
1150		asoc->strmout[i].abandoned_unsent[0] = 0;
1151#endif
1152		asoc->strmout[i].stream_no = i;
1153		asoc->strmout[i].last_msg_incomplete = 0;
1154		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1155	}
1156	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1157
1158	/* Now the mapping array */
1159	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1160	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1161		    SCTP_M_MAP);
1162	if (asoc->mapping_array == NULL) {
1163		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1164		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1165		return (ENOMEM);
1166	}
1167	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1168	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1169	    SCTP_M_MAP);
1170	if (asoc->nr_mapping_array == NULL) {
1171		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1172		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1173		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1174		return (ENOMEM);
1175	}
1176	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1177
1178	/* Now the init of the other outqueues */
1179	TAILQ_INIT(&asoc->free_chunks);
1180	TAILQ_INIT(&asoc->control_send_queue);
1181	TAILQ_INIT(&asoc->asconf_send_queue);
1182	TAILQ_INIT(&asoc->send_queue);
1183	TAILQ_INIT(&asoc->sent_queue);
1184	TAILQ_INIT(&asoc->reasmqueue);
1185	TAILQ_INIT(&asoc->resetHead);
1186	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1187	TAILQ_INIT(&asoc->asconf_queue);
1188	/* authentication fields */
1189	asoc->authinfo.random = NULL;
1190	asoc->authinfo.active_keyid = 0;
1191	asoc->authinfo.assoc_key = NULL;
1192	asoc->authinfo.assoc_keyid = 0;
1193	asoc->authinfo.recv_key = NULL;
1194	asoc->authinfo.recv_keyid = 0;
1195	LIST_INIT(&asoc->shared_keys);
1196	asoc->marked_retrans = 0;
1197	asoc->port = inp->sctp_ep.port;
1198	asoc->timoinit = 0;
1199	asoc->timodata = 0;
1200	asoc->timosack = 0;
1201	asoc->timoshutdown = 0;
1202	asoc->timoheartbeat = 0;
1203	asoc->timocookie = 0;
1204	asoc->timoshutdownack = 0;
1205	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1206	asoc->discontinuity_time = asoc->start_time;
1207	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1208		asoc->abandoned_unsent[i] = 0;
1209		asoc->abandoned_sent[i] = 0;
1210	}
1211	/* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1212	 * the association is freed.
1213	 */
1214	return (0);
1215}
1216
1217void
1218sctp_print_mapping_array(struct sctp_association *asoc)
1219{
1220	unsigned int i, limit;
1221
1222	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1223	            asoc->mapping_array_size,
1224	            asoc->mapping_array_base_tsn,
1225	            asoc->cumulative_tsn,
1226	            asoc->highest_tsn_inside_map,
1227	            asoc->highest_tsn_inside_nr_map);
1228	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1229		if (asoc->mapping_array[limit - 1] != 0) {
1230			break;
1231		}
1232	}
1233	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1234	for (i = 0; i < limit; i++) {
1235		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1236	}
1237	if (limit % 16)
1238		SCTP_PRINTF("\n");
1239	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1240		if (asoc->nr_mapping_array[limit - 1]) {
1241			break;
1242		}
1243	}
1244	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1245	for (i = 0; i < limit; i++) {
1246		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1247	}
1248	if (limit % 16)
1249		SCTP_PRINTF("\n");
1250}
1251
1252int
1253sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1254{
1255	/* mapping array needs to grow */
1256	uint8_t *new_array1, *new_array2;
1257	uint32_t new_size;
1258
1259	new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1260	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1261	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1262	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1263		/* can't get more, forget it */
1264		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1265		if (new_array1) {
1266			SCTP_FREE(new_array1, SCTP_M_MAP);
1267		}
1268		if (new_array2) {
1269			SCTP_FREE(new_array2, SCTP_M_MAP);
1270		}
1271		return (-1);
1272	}
1273	memset(new_array1, 0, new_size);
1274	memset(new_array2, 0, new_size);
1275	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1276	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1277	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1278	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1279	asoc->mapping_array = new_array1;
1280	asoc->nr_mapping_array = new_array2;
1281	asoc->mapping_array_size = new_size;
1282	return (0);
1283}
1284
1285
1286static void
1287sctp_iterator_work(struct sctp_iterator *it)
1288{
1289	int iteration_count = 0;
1290	int inp_skip = 0;
1291	int first_in = 1;
1292	struct sctp_inpcb *tinp;
1293
1294	SCTP_INP_INFO_RLOCK();
1295	SCTP_ITERATOR_LOCK();
1296	if (it->inp) {
1297		SCTP_INP_RLOCK(it->inp);
1298		SCTP_INP_DECR_REF(it->inp);
1299	}
1300	if (it->inp == NULL) {
1301		/* iterator is complete */
1302done_with_iterator:
1303		SCTP_ITERATOR_UNLOCK();
1304		SCTP_INP_INFO_RUNLOCK();
1305		if (it->function_atend != NULL) {
1306			(*it->function_atend) (it->pointer, it->val);
1307		}
1308		SCTP_FREE(it, SCTP_M_ITER);
1309		return;
1310	}
1311select_a_new_ep:
1312	if (first_in) {
1313		first_in = 0;
1314	} else {
1315		SCTP_INP_RLOCK(it->inp);
1316	}
1317	while (((it->pcb_flags) &&
1318		((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1319	       ((it->pcb_features) &&
1320		((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1321		/* endpoint flags or features don't match, so keep looking */
1322		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1323			SCTP_INP_RUNLOCK(it->inp);
1324			goto done_with_iterator;
1325		}
1326		tinp = it->inp;
1327		it->inp = LIST_NEXT(it->inp, sctp_list);
1328		SCTP_INP_RUNLOCK(tinp);
1329		if (it->inp == NULL) {
1330			goto done_with_iterator;
1331		}
1332		SCTP_INP_RLOCK(it->inp);
1333	}
1334	/* now go through each assoc which is in the desired state */
1335	if (it->done_current_ep == 0) {
1336		if (it->function_inp != NULL)
1337			inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1338		it->done_current_ep = 1;
1339	}
1340	if (it->stcb == NULL) {
1341		/* run the per instance function */
1342		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1343	}
1344	if ((inp_skip) || it->stcb == NULL) {
1345		if (it->function_inp_end != NULL) {
1346			inp_skip = (*it->function_inp_end)(it->inp,
1347							   it->pointer,
1348							   it->val);
1349		}
1350		SCTP_INP_RUNLOCK(it->inp);
1351		goto no_stcb;
1352	}
1353	while (it->stcb) {
1354		SCTP_TCB_LOCK(it->stcb);
1355		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1356			/* not in the right state... keep looking */
1357			SCTP_TCB_UNLOCK(it->stcb);
1358			goto next_assoc;
1359		}
1360		/* see if we have limited out the iterator loop */
1361		iteration_count++;
1362		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1363			/* Pause to let others grab the lock */
1364			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1365			SCTP_TCB_UNLOCK(it->stcb);
1366			SCTP_INP_INCR_REF(it->inp);
1367			SCTP_INP_RUNLOCK(it->inp);
1368			SCTP_ITERATOR_UNLOCK();
1369			SCTP_INP_INFO_RUNLOCK();
1370			SCTP_INP_INFO_RLOCK();
1371			SCTP_ITERATOR_LOCK();
1372			if (sctp_it_ctl.iterator_flags) {
1373				/* We won't be staying here */
1374				SCTP_INP_DECR_REF(it->inp);
1375				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1376#if !defined(__FreeBSD__)
1377				if (sctp_it_ctl.iterator_flags &
1378				   SCTP_ITERATOR_MUST_EXIT) {
1379					goto done_with_iterator;
1380				}
1381#endif
1382				if (sctp_it_ctl.iterator_flags &
1383				   SCTP_ITERATOR_STOP_CUR_IT) {
1384					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1385					goto done_with_iterator;
1386				}
1387				if (sctp_it_ctl.iterator_flags &
1388				   SCTP_ITERATOR_STOP_CUR_INP) {
1389					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1390					goto no_stcb;
1391				}
1392				/* If we reach here huh? */
1393				SCTP_PRINTF("Unknown it ctl flag %x\n",
1394					    sctp_it_ctl.iterator_flags);
1395				sctp_it_ctl.iterator_flags = 0;
1396			}
1397			SCTP_INP_RLOCK(it->inp);
1398			SCTP_INP_DECR_REF(it->inp);
1399			SCTP_TCB_LOCK(it->stcb);
1400			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1401			iteration_count = 0;
1402		}
1403
1404		/* run function on this one */
1405		(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1406
1407		/*
1408		 * we lie here, it really needs to have its own type but
1409		 * first I must verify that this won't effect things :-0
1410		 */
1411		if (it->no_chunk_output == 0)
1412			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1413
1414		SCTP_TCB_UNLOCK(it->stcb);
1415	next_assoc:
1416		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1417		if (it->stcb == NULL) {
1418			/* Run last function */
1419			if (it->function_inp_end != NULL) {
1420				inp_skip = (*it->function_inp_end)(it->inp,
1421								   it->pointer,
1422								   it->val);
1423			}
1424		}
1425	}
1426	SCTP_INP_RUNLOCK(it->inp);
1427 no_stcb:
1428	/* done with all assocs on this endpoint, move on to next endpoint */
1429	it->done_current_ep = 0;
1430	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1431		it->inp = NULL;
1432	} else {
1433		it->inp = LIST_NEXT(it->inp, sctp_list);
1434	}
1435	if (it->inp == NULL) {
1436		goto done_with_iterator;
1437	}
1438	goto select_a_new_ep;
1439}
1440
1441void
1442sctp_iterator_worker(void)
1443{
1444	struct sctp_iterator *it, *nit;
1445
1446	/* This function is called with the WQ lock in place */
1447
1448	sctp_it_ctl.iterator_running = 1;
1449	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1450		sctp_it_ctl.cur_it = it;
1451		/* now lets work on this one */
1452		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1453		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1454#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1455		CURVNET_SET(it->vn);
1456#endif
1457		sctp_iterator_work(it);
1458		sctp_it_ctl.cur_it = NULL;
1459#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1460		CURVNET_RESTORE();
1461#endif
1462		SCTP_IPI_ITERATOR_WQ_LOCK();
1463#if !defined(__FreeBSD__)
1464		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1465			break;
1466		}
1467#endif
1468	        /*sa_ignore FREED_MEMORY*/
1469	}
1470	sctp_it_ctl.iterator_running = 0;
1471	return;
1472}
1473
1474
1475static void
1476sctp_handle_addr_wq(void)
1477{
1478	/* deal with the ADDR wq from the rtsock calls */
1479	struct sctp_laddr *wi, *nwi;
1480	struct sctp_asconf_iterator *asc;
1481
1482	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1483		    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1484	if (asc == NULL) {
1485		/* Try later, no memory */
1486		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1487				 (struct sctp_inpcb *)NULL,
1488				 (struct sctp_tcb *)NULL,
1489				 (struct sctp_nets *)NULL);
1490		return;
1491	}
1492	LIST_INIT(&asc->list_of_work);
1493	asc->cnt = 0;
1494
1495	SCTP_WQ_ADDR_LOCK();
1496	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1497		LIST_REMOVE(wi, sctp_nxt_addr);
1498		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1499		asc->cnt++;
1500	}
1501	SCTP_WQ_ADDR_UNLOCK();
1502
1503	if (asc->cnt == 0) {
1504		SCTP_FREE(asc, SCTP_M_ASC_IT);
1505	} else {
1506		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1507					     sctp_asconf_iterator_stcb,
1508					     NULL, /* No ep end for boundall */
1509					     SCTP_PCB_FLAGS_BOUNDALL,
1510					     SCTP_PCB_ANY_FEATURES,
1511					     SCTP_ASOC_ANY_STATE,
1512					     (void *)asc, 0,
1513					     sctp_asconf_iterator_end, NULL, 0);
1514	}
1515}
1516
1517void
1518sctp_timeout_handler(void *t)
1519{
1520	struct sctp_inpcb *inp;
1521	struct sctp_tcb *stcb;
1522	struct sctp_nets *net;
1523	struct sctp_timer *tmr;
1524#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1525	struct socket *so;
1526#endif
1527	int did_output, type;
1528
1529	tmr = (struct sctp_timer *)t;
1530	inp = (struct sctp_inpcb *)tmr->ep;
1531	stcb = (struct sctp_tcb *)tmr->tcb;
1532	net = (struct sctp_nets *)tmr->net;
1533#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1534	CURVNET_SET((struct vnet *)tmr->vnet);
1535#endif
1536	did_output = 1;
1537
1538#ifdef SCTP_AUDITING_ENABLED
1539	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1540	sctp_auditing(3, inp, stcb, net);
1541#endif
1542
1543	/* sanity checks... */
1544	if (tmr->self != (void *)tmr) {
1545		/*
1546		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1547		 *             (void *)tmr);
1548		 */
1549#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1550		CURVNET_RESTORE();
1551#endif
1552		return;
1553	}
1554	tmr->stopped_from = 0xa001;
1555	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1556		/*
1557		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1558		 * tmr->type);
1559		 */
1560#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1561		CURVNET_RESTORE();
1562#endif
1563		return;
1564	}
1565	tmr->stopped_from = 0xa002;
1566	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1567#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1568		CURVNET_RESTORE();
1569#endif
1570		return;
1571	}
1572	/* if this is an iterator timeout, get the struct and clear inp */
1573	tmr->stopped_from = 0xa003;
1574	type = tmr->type;
1575	if (inp) {
1576		SCTP_INP_INCR_REF(inp);
1577		if ((inp->sctp_socket == NULL) &&
1578		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1579		     (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1580		     (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1581		     (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1582		     (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1583		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1584		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1585		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1586		     (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1587			) {
1588			SCTP_INP_DECR_REF(inp);
1589#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1590			CURVNET_RESTORE();
1591#endif
1592			return;
1593		}
1594	}
1595	tmr->stopped_from = 0xa004;
1596	if (stcb) {
1597		atomic_add_int(&stcb->asoc.refcnt, 1);
1598		if (stcb->asoc.state == 0) {
1599			atomic_add_int(&stcb->asoc.refcnt, -1);
1600			if (inp) {
1601				SCTP_INP_DECR_REF(inp);
1602			}
1603#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1604			CURVNET_RESTORE();
1605#endif
1606			return;
1607		}
1608	}
1609	tmr->stopped_from = 0xa005;
1610	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1611	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1612		if (inp) {
1613			SCTP_INP_DECR_REF(inp);
1614		}
1615		if (stcb) {
1616			atomic_add_int(&stcb->asoc.refcnt, -1);
1617		}
1618#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1619		CURVNET_RESTORE();
1620#endif
1621		return;
1622	}
1623	tmr->stopped_from = 0xa006;
1624
1625	if (stcb) {
1626		SCTP_TCB_LOCK(stcb);
1627		atomic_add_int(&stcb->asoc.refcnt, -1);
1628		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1629		    ((stcb->asoc.state == 0) ||
1630		     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1631			SCTP_TCB_UNLOCK(stcb);
1632			if (inp) {
1633				SCTP_INP_DECR_REF(inp);
1634			}
1635#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1636			CURVNET_RESTORE();
1637#endif
1638			return;
1639		}
1640	}
1641	/* record in stopped what t-o occured */
1642	tmr->stopped_from = tmr->type;
1643
1644	/* mark as being serviced now */
1645	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1646		/*
1647		 * Callout has been rescheduled.
1648		 */
1649		goto get_out;
1650	}
1651	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1652		/*
1653		 * Not active, so no action.
1654		 */
1655		goto get_out;
1656	}
1657	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1658
1659	/* call the handler for the appropriate timer type */
1660	switch (tmr->type) {
1661	case SCTP_TIMER_TYPE_ZERO_COPY:
1662		if (inp == NULL) {
1663			break;
1664		}
1665		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1666			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1667		}
1668		break;
1669	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1670		if (inp == NULL) {
1671			break;
1672		}
1673		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1674		    SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1675		}
1676                break;
1677	case SCTP_TIMER_TYPE_ADDR_WQ:
1678		sctp_handle_addr_wq();
1679		break;
1680	case SCTP_TIMER_TYPE_SEND:
1681		if ((stcb == NULL) || (inp == NULL)) {
1682			break;
1683		}
1684		SCTP_STAT_INCR(sctps_timodata);
1685		stcb->asoc.timodata++;
1686		stcb->asoc.num_send_timers_up--;
1687		if (stcb->asoc.num_send_timers_up < 0) {
1688			stcb->asoc.num_send_timers_up = 0;
1689		}
1690		SCTP_TCB_LOCK_ASSERT(stcb);
1691		if (sctp_t3rxt_timer(inp, stcb, net)) {
1692			/* no need to unlock on tcb its gone */
1693
1694			goto out_decr;
1695		}
1696		SCTP_TCB_LOCK_ASSERT(stcb);
1697#ifdef SCTP_AUDITING_ENABLED
1698		sctp_auditing(4, inp, stcb, net);
1699#endif
1700		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1701		if ((stcb->asoc.num_send_timers_up == 0) &&
1702		    (stcb->asoc.sent_queue_cnt > 0)) {
1703			struct sctp_tmit_chunk *chk;
1704
1705			/*
1706			 * safeguard. If there on some on the sent queue
1707			 * somewhere but no timers running something is
1708			 * wrong... so we start a timer on the first chunk
1709			 * on the send queue on whatever net it is sent to.
1710			 */
1711			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1712			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1713			    chk->whoTo);
1714		}
1715		break;
1716	case SCTP_TIMER_TYPE_INIT:
1717		if ((stcb == NULL) || (inp == NULL)) {
1718			break;
1719		}
1720		SCTP_STAT_INCR(sctps_timoinit);
1721		stcb->asoc.timoinit++;
1722		if (sctp_t1init_timer(inp, stcb, net)) {
1723			/* no need to unlock on tcb its gone */
1724			goto out_decr;
1725		}
1726		/* We do output but not here */
1727		did_output = 0;
1728		break;
1729	case SCTP_TIMER_TYPE_RECV:
1730		if ((stcb == NULL) || (inp == NULL)) {
1731			break;
1732		}
1733		SCTP_STAT_INCR(sctps_timosack);
1734		stcb->asoc.timosack++;
1735		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1736#ifdef SCTP_AUDITING_ENABLED
1737		sctp_auditing(4, inp, stcb, net);
1738#endif
1739		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1740		break;
1741	case SCTP_TIMER_TYPE_SHUTDOWN:
1742		if ((stcb == NULL) || (inp == NULL)) {
1743			break;
1744		}
1745		if (sctp_shutdown_timer(inp, stcb, net)) {
1746			/* no need to unlock on tcb its gone */
1747			goto out_decr;
1748		}
1749		SCTP_STAT_INCR(sctps_timoshutdown);
1750		stcb->asoc.timoshutdown++;
1751#ifdef SCTP_AUDITING_ENABLED
1752		sctp_auditing(4, inp, stcb, net);
1753#endif
1754		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1755		break;
1756	case SCTP_TIMER_TYPE_HEARTBEAT:
1757		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1758			break;
1759		}
1760		SCTP_STAT_INCR(sctps_timoheartbeat);
1761		stcb->asoc.timoheartbeat++;
1762		if (sctp_heartbeat_timer(inp, stcb, net)) {
1763			/* no need to unlock on tcb its gone */
1764			goto out_decr;
1765		}
1766#ifdef SCTP_AUDITING_ENABLED
1767		sctp_auditing(4, inp, stcb, net);
1768#endif
1769		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1770			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1771			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1772		}
1773		break;
1774	case SCTP_TIMER_TYPE_COOKIE:
1775		if ((stcb == NULL) || (inp == NULL)) {
1776			break;
1777		}
1778
1779		if (sctp_cookie_timer(inp, stcb, net)) {
1780			/* no need to unlock on tcb its gone */
1781			goto out_decr;
1782		}
1783		SCTP_STAT_INCR(sctps_timocookie);
1784		stcb->asoc.timocookie++;
1785#ifdef SCTP_AUDITING_ENABLED
1786		sctp_auditing(4, inp, stcb, net);
1787#endif
1788		/*
1789		 * We consider T3 and Cookie timer pretty much the same with
1790		 * respect to where from in chunk_output.
1791		 */
1792		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1793		break;
1794	case SCTP_TIMER_TYPE_NEWCOOKIE:
1795		{
1796			struct timeval tv;
1797			int i, secret;
1798			if (inp == NULL) {
1799				break;
1800			}
1801			SCTP_STAT_INCR(sctps_timosecret);
1802			(void)SCTP_GETTIME_TIMEVAL(&tv);
1803			SCTP_INP_WLOCK(inp);
1804			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1805			inp->sctp_ep.last_secret_number =
1806			    inp->sctp_ep.current_secret_number;
1807			inp->sctp_ep.current_secret_number++;
1808			if (inp->sctp_ep.current_secret_number >=
1809			    SCTP_HOW_MANY_SECRETS) {
1810				inp->sctp_ep.current_secret_number = 0;
1811			}
1812			secret = (int)inp->sctp_ep.current_secret_number;
1813			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1814				inp->sctp_ep.secret_key[secret][i] =
1815				    sctp_select_initial_TSN(&inp->sctp_ep);
1816			}
1817			SCTP_INP_WUNLOCK(inp);
1818			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1819		}
1820		did_output = 0;
1821		break;
1822	case SCTP_TIMER_TYPE_PATHMTURAISE:
1823		if ((stcb == NULL) || (inp == NULL)) {
1824			break;
1825		}
1826		SCTP_STAT_INCR(sctps_timopathmtu);
1827		sctp_pathmtu_timer(inp, stcb, net);
1828		did_output = 0;
1829		break;
1830	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1831		if ((stcb == NULL) || (inp == NULL)) {
1832			break;
1833		}
1834		if (sctp_shutdownack_timer(inp, stcb, net)) {
1835			/* no need to unlock on tcb its gone */
1836			goto out_decr;
1837		}
1838		SCTP_STAT_INCR(sctps_timoshutdownack);
1839		stcb->asoc.timoshutdownack++;
1840#ifdef SCTP_AUDITING_ENABLED
1841		sctp_auditing(4, inp, stcb, net);
1842#endif
1843		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1844		break;
1845	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1846		if ((stcb == NULL) || (inp == NULL)) {
1847			break;
1848		}
1849		SCTP_STAT_INCR(sctps_timoshutdownguard);
1850		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1851		/* no need to unlock on tcb its gone */
1852		goto out_decr;
1853
1854	case SCTP_TIMER_TYPE_STRRESET:
1855		if ((stcb == NULL) || (inp == NULL)) {
1856			break;
1857		}
1858		if (sctp_strreset_timer(inp, stcb, net)) {
1859			/* no need to unlock on tcb its gone */
1860			goto out_decr;
1861		}
1862		SCTP_STAT_INCR(sctps_timostrmrst);
1863		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1864		break;
1865	case SCTP_TIMER_TYPE_ASCONF:
1866		if ((stcb == NULL) || (inp == NULL)) {
1867			break;
1868		}
1869		if (sctp_asconf_timer(inp, stcb, net)) {
1870			/* no need to unlock on tcb its gone */
1871			goto out_decr;
1872		}
1873		SCTP_STAT_INCR(sctps_timoasconf);
1874#ifdef SCTP_AUDITING_ENABLED
1875		sctp_auditing(4, inp, stcb, net);
1876#endif
1877		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1878		break;
1879	case SCTP_TIMER_TYPE_PRIM_DELETED:
1880		if ((stcb == NULL) || (inp == NULL)) {
1881			break;
1882		}
1883		sctp_delete_prim_timer(inp, stcb, net);
1884		SCTP_STAT_INCR(sctps_timodelprim);
1885		break;
1886
1887	case SCTP_TIMER_TYPE_AUTOCLOSE:
1888		if ((stcb == NULL) || (inp == NULL)) {
1889			break;
1890		}
1891		SCTP_STAT_INCR(sctps_timoautoclose);
1892		sctp_autoclose_timer(inp, stcb, net);
1893		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1894		did_output = 0;
1895		break;
1896	case SCTP_TIMER_TYPE_ASOCKILL:
1897		if ((stcb == NULL) || (inp == NULL)) {
1898			break;
1899		}
1900		SCTP_STAT_INCR(sctps_timoassockill);
1901		/* Can we free it yet? */
1902		SCTP_INP_DECR_REF(inp);
1903		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
1904#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1905		so = SCTP_INP_SO(inp);
1906		atomic_add_int(&stcb->asoc.refcnt, 1);
1907		SCTP_TCB_UNLOCK(stcb);
1908		SCTP_SOCKET_LOCK(so, 1);
1909		SCTP_TCB_LOCK(stcb);
1910		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1911#endif
1912		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
1913#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1914		SCTP_SOCKET_UNLOCK(so, 1);
1915#endif
1916		/*
1917		 * free asoc, always unlocks (or destroy's) so prevent
1918		 * duplicate unlock or unlock of a free mtx :-0
1919		 */
1920		stcb = NULL;
1921		goto out_no_decr;
1922	case SCTP_TIMER_TYPE_INPKILL:
1923		SCTP_STAT_INCR(sctps_timoinpkill);
1924		if (inp == NULL) {
1925			break;
1926		}
1927		/*
1928		 * special case, take away our increment since WE are the
1929		 * killer
1930		 */
1931		SCTP_INP_DECR_REF(inp);
1932		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
1933#if defined(__APPLE__)
1934		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
1935#endif
1936		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1937				SCTP_CALLED_FROM_INPKILL_TIMER);
1938#if defined(__APPLE__)
1939		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
1940#endif
1941		inp = NULL;
1942		goto out_no_decr;
1943	default:
1944		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1945			tmr->type);
1946		break;
1947	}
1948#ifdef SCTP_AUDITING_ENABLED
1949	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1950	if (inp)
1951		sctp_auditing(5, inp, stcb, net);
1952#endif
1953	if ((did_output) && stcb) {
1954		/*
1955		 * Now we need to clean up the control chunk chain if an
1956		 * ECNE is on it. It must be marked as UNSENT again so next
1957		 * call will continue to send it until such time that we get
1958		 * a CWR, to remove it. It is, however, less likely that we
1959		 * will find a ecn echo on the chain though.
1960		 */
1961		sctp_fix_ecn_echo(&stcb->asoc);
1962	}
1963get_out:
1964	if (stcb) {
1965		SCTP_TCB_UNLOCK(stcb);
1966	}
1967
1968out_decr:
1969	if (inp) {
1970		SCTP_INP_DECR_REF(inp);
1971	}
1972
1973out_no_decr:
1974	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1975			  type);
1976#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1977	CURVNET_RESTORE();
1978#endif
1979}
1980
1981void
1982sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1983    struct sctp_nets *net)
1984{
1985	uint32_t to_ticks;
1986	struct sctp_timer *tmr;
1987
1988	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1989		return;
1990
1991	tmr = NULL;
1992	if (stcb) {
1993		SCTP_TCB_LOCK_ASSERT(stcb);
1994	}
1995	switch (t_type) {
1996	case SCTP_TIMER_TYPE_ZERO_COPY:
1997		tmr = &inp->sctp_ep.zero_copy_timer;
1998		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1999		break;
2000	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2001		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2002		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
2003		break;
2004	case SCTP_TIMER_TYPE_ADDR_WQ:
2005		/* Only 1 tick away :-) */
2006		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2007		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2008		break;
2009	case SCTP_TIMER_TYPE_SEND:
2010		/* Here we use the RTO timer */
2011		{
2012			int rto_val;
2013
2014			if ((stcb == NULL) || (net == NULL)) {
2015				return;
2016			}
2017			tmr = &net->rxt_timer;
2018			if (net->RTO == 0) {
2019				rto_val = stcb->asoc.initial_rto;
2020			} else {
2021				rto_val = net->RTO;
2022			}
2023			to_ticks = MSEC_TO_TICKS(rto_val);
2024		}
2025		break;
2026	case SCTP_TIMER_TYPE_INIT:
2027		/*
2028		 * Here we use the INIT timer default usually about 1
2029		 * minute.
2030		 */
2031		if ((stcb == NULL) || (net == NULL)) {
2032			return;
2033		}
2034		tmr = &net->rxt_timer;
2035		if (net->RTO == 0) {
2036			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2037		} else {
2038			to_ticks = MSEC_TO_TICKS(net->RTO);
2039		}
2040		break;
2041	case SCTP_TIMER_TYPE_RECV:
2042		/*
2043		 * Here we use the Delayed-Ack timer value from the inp
2044		 * ususually about 200ms.
2045		 */
2046		if (stcb == NULL) {
2047			return;
2048		}
2049		tmr = &stcb->asoc.dack_timer;
2050		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2051		break;
2052	case SCTP_TIMER_TYPE_SHUTDOWN:
2053		/* Here we use the RTO of the destination. */
2054		if ((stcb == NULL) || (net == NULL)) {
2055			return;
2056		}
2057		if (net->RTO == 0) {
2058			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2059		} else {
2060			to_ticks = MSEC_TO_TICKS(net->RTO);
2061		}
2062		tmr = &net->rxt_timer;
2063		break;
2064	case SCTP_TIMER_TYPE_HEARTBEAT:
2065		/*
2066		 * the net is used here so that we can add in the RTO. Even
2067		 * though we use a different timer. We also add the HB timer
2068		 * PLUS a random jitter.
2069		 */
2070		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2071			return;
2072		} else {
2073			uint32_t rndval;
2074			uint32_t jitter;
2075
2076			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2077			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2078				return;
2079			}
2080			if (net->RTO == 0) {
2081				to_ticks = stcb->asoc.initial_rto;
2082			} else {
2083				to_ticks = net->RTO;
2084			}
2085			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2086			jitter = rndval % to_ticks;
2087			if (jitter >= (to_ticks >> 1)) {
2088				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2089			} else {
2090				to_ticks = to_ticks - jitter;
2091			}
2092			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2093			    !(net->dest_state & SCTP_ADDR_PF)) {
2094				to_ticks += net->heart_beat_delay;
2095			}
2096			/*
2097			 * Now we must convert the to_ticks that are now in
2098			 * ms to ticks.
2099			 */
2100			to_ticks = MSEC_TO_TICKS(to_ticks);
2101			tmr = &net->hb_timer;
2102		}
2103		break;
2104	case SCTP_TIMER_TYPE_COOKIE:
2105		/*
2106		 * Here we can use the RTO timer from the network since one
2107		 * RTT was compelete. If a retran happened then we will be
2108		 * using the RTO initial value.
2109		 */
2110		if ((stcb == NULL) || (net == NULL)) {
2111			return;
2112		}
2113		if (net->RTO == 0) {
2114			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2115		} else {
2116			to_ticks = MSEC_TO_TICKS(net->RTO);
2117		}
2118		tmr = &net->rxt_timer;
2119		break;
2120	case SCTP_TIMER_TYPE_NEWCOOKIE:
2121		/*
2122		 * nothing needed but the endpoint here ususually about 60
2123		 * minutes.
2124		 */
2125		if (inp == NULL) {
2126			return;
2127		}
2128		tmr = &inp->sctp_ep.signature_change;
2129		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2130		break;
2131	case SCTP_TIMER_TYPE_ASOCKILL:
2132		if (stcb == NULL) {
2133			return;
2134		}
2135		tmr = &stcb->asoc.strreset_timer;
2136		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2137		break;
2138	case SCTP_TIMER_TYPE_INPKILL:
2139		/*
2140		 * The inp is setup to die. We re-use the signature_chage
2141		 * timer since that has stopped and we are in the GONE
2142		 * state.
2143		 */
2144		if (inp == NULL) {
2145			return;
2146		}
2147		tmr = &inp->sctp_ep.signature_change;
2148		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2149		break;
2150	case SCTP_TIMER_TYPE_PATHMTURAISE:
2151		/*
2152		 * Here we use the value found in the EP for PMTU ususually
2153		 * about 10 minutes.
2154		 */
2155		if ((stcb == NULL) || (inp == NULL)) {
2156			return;
2157		}
2158		if (net == NULL) {
2159			return;
2160		}
2161		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2162			return;
2163		}
2164		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2165		tmr = &net->pmtu_timer;
2166		break;
2167	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2168		/* Here we use the RTO of the destination */
2169		if ((stcb == NULL) || (net == NULL)) {
2170			return;
2171		}
2172		if (net->RTO == 0) {
2173			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2174		} else {
2175			to_ticks = MSEC_TO_TICKS(net->RTO);
2176		}
2177		tmr = &net->rxt_timer;
2178		break;
2179	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2180		/*
2181		 * Here we use the endpoints shutdown guard timer usually
2182		 * about 3 minutes.
2183		 */
2184		if ((inp == NULL) || (stcb == NULL)) {
2185			return;
2186		}
2187		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2188		tmr = &stcb->asoc.shut_guard_timer;
2189		break;
2190	case SCTP_TIMER_TYPE_STRRESET:
2191		/*
2192		 * Here the timer comes from the stcb but its value is from
2193		 * the net's RTO.
2194		 */
2195		if ((stcb == NULL) || (net == NULL)) {
2196			return;
2197		}
2198		if (net->RTO == 0) {
2199			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2200		} else {
2201			to_ticks = MSEC_TO_TICKS(net->RTO);
2202		}
2203		tmr = &stcb->asoc.strreset_timer;
2204		break;
2205	case SCTP_TIMER_TYPE_ASCONF:
2206		/*
2207		 * Here the timer comes from the stcb but its value is from
2208		 * the net's RTO.
2209		 */
2210		if ((stcb == NULL) || (net == NULL)) {
2211			return;
2212		}
2213		if (net->RTO == 0) {
2214			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2215		} else {
2216			to_ticks = MSEC_TO_TICKS(net->RTO);
2217		}
2218		tmr = &stcb->asoc.asconf_timer;
2219		break;
2220	case SCTP_TIMER_TYPE_PRIM_DELETED:
2221		if ((stcb == NULL) || (net != NULL)) {
2222			return;
2223		}
2224		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2225		tmr = &stcb->asoc.delete_prim_timer;
2226		break;
2227	case SCTP_TIMER_TYPE_AUTOCLOSE:
2228		if (stcb == NULL) {
2229			return;
2230		}
2231		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2232			/*
2233			 * Really an error since stcb is NOT set to
2234			 * autoclose
2235			 */
2236			return;
2237		}
2238		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2239		tmr = &stcb->asoc.autoclose_timer;
2240		break;
2241	default:
2242		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2243			__FUNCTION__, t_type);
2244		return;
2245		break;
2246	}
2247	if ((to_ticks <= 0) || (tmr == NULL)) {
2248		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2249			__FUNCTION__, t_type, to_ticks, (void *)tmr);
2250		return;
2251	}
2252	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2253		/*
2254		 * we do NOT allow you to have it already running. if it is
2255		 * we leave the current one up unchanged
2256		 */
2257		return;
2258	}
2259	/* At this point we can proceed */
2260	if (t_type == SCTP_TIMER_TYPE_SEND) {
2261		stcb->asoc.num_send_timers_up++;
2262	}
2263	tmr->stopped_from = 0;
2264	tmr->type = t_type;
2265	tmr->ep = (void *)inp;
2266	tmr->tcb = (void *)stcb;
2267	tmr->net = (void *)net;
2268	tmr->self = (void *)tmr;
2269#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
2270	tmr->vnet = (void *)curvnet;
2271#endif
2272#ifndef __Panda__
2273	tmr->ticks = sctp_get_tick_count();
2274#endif
2275	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2276	return;
2277}
2278
2279void
2280sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2281    struct sctp_nets *net, uint32_t from)
2282{
2283	struct sctp_timer *tmr;
2284
2285	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2286	    (inp == NULL))
2287		return;
2288
2289	tmr = NULL;
2290	if (stcb) {
2291		SCTP_TCB_LOCK_ASSERT(stcb);
2292	}
2293	switch (t_type) {
2294	case SCTP_TIMER_TYPE_ZERO_COPY:
2295		tmr = &inp->sctp_ep.zero_copy_timer;
2296		break;
2297	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2298		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2299		break;
2300	case SCTP_TIMER_TYPE_ADDR_WQ:
2301		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2302		break;
2303	case SCTP_TIMER_TYPE_SEND:
2304		if ((stcb == NULL) || (net == NULL)) {
2305			return;
2306		}
2307		tmr = &net->rxt_timer;
2308		break;
2309	case SCTP_TIMER_TYPE_INIT:
2310		if ((stcb == NULL) || (net == NULL)) {
2311			return;
2312		}
2313		tmr = &net->rxt_timer;
2314		break;
2315	case SCTP_TIMER_TYPE_RECV:
2316		if (stcb == NULL) {
2317			return;
2318		}
2319		tmr = &stcb->asoc.dack_timer;
2320		break;
2321	case SCTP_TIMER_TYPE_SHUTDOWN:
2322		if ((stcb == NULL) || (net == NULL)) {
2323			return;
2324		}
2325		tmr = &net->rxt_timer;
2326		break;
2327	case SCTP_TIMER_TYPE_HEARTBEAT:
2328		if ((stcb == NULL) || (net == NULL)) {
2329			return;
2330		}
2331		tmr = &net->hb_timer;
2332		break;
2333	case SCTP_TIMER_TYPE_COOKIE:
2334		if ((stcb == NULL) || (net == NULL)) {
2335			return;
2336		}
2337		tmr = &net->rxt_timer;
2338		break;
2339	case SCTP_TIMER_TYPE_NEWCOOKIE:
2340		/* nothing needed but the endpoint here */
2341		tmr = &inp->sctp_ep.signature_change;
2342		/*
2343		 * We re-use the newcookie timer for the INP kill timer. We
2344		 * must assure that we do not kill it by accident.
2345		 */
2346		break;
2347	case SCTP_TIMER_TYPE_ASOCKILL:
2348		/*
2349		 * Stop the asoc kill timer.
2350		 */
2351		if (stcb == NULL) {
2352			return;
2353		}
2354		tmr = &stcb->asoc.strreset_timer;
2355		break;
2356
2357	case SCTP_TIMER_TYPE_INPKILL:
2358		/*
2359		 * The inp is setup to die. We re-use the signature_chage
2360		 * timer since that has stopped and we are in the GONE
2361		 * state.
2362		 */
2363		tmr = &inp->sctp_ep.signature_change;
2364		break;
2365	case SCTP_TIMER_TYPE_PATHMTURAISE:
2366		if ((stcb == NULL) || (net == NULL)) {
2367			return;
2368		}
2369		tmr = &net->pmtu_timer;
2370		break;
2371	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2372		if ((stcb == NULL) || (net == NULL)) {
2373			return;
2374		}
2375		tmr = &net->rxt_timer;
2376		break;
2377	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2378		if (stcb == NULL) {
2379			return;
2380		}
2381		tmr = &stcb->asoc.shut_guard_timer;
2382		break;
2383	case SCTP_TIMER_TYPE_STRRESET:
2384		if (stcb == NULL) {
2385			return;
2386		}
2387		tmr = &stcb->asoc.strreset_timer;
2388		break;
2389	case SCTP_TIMER_TYPE_ASCONF:
2390		if (stcb == NULL) {
2391			return;
2392		}
2393		tmr = &stcb->asoc.asconf_timer;
2394		break;
2395	case SCTP_TIMER_TYPE_PRIM_DELETED:
2396		if (stcb == NULL) {
2397			return;
2398		}
2399		tmr = &stcb->asoc.delete_prim_timer;
2400		break;
2401	case SCTP_TIMER_TYPE_AUTOCLOSE:
2402		if (stcb == NULL) {
2403			return;
2404		}
2405		tmr = &stcb->asoc.autoclose_timer;
2406		break;
2407	default:
2408		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2409			__FUNCTION__, t_type);
2410		break;
2411	}
2412	if (tmr == NULL) {
2413		return;
2414	}
2415	if ((tmr->type != t_type) && tmr->type) {
2416		/*
2417		 * Ok we have a timer that is under joint use. Cookie timer
2418		 * per chance with the SEND timer. We therefore are NOT
2419		 * running the timer that the caller wants stopped.  So just
2420		 * return.
2421		 */
2422		return;
2423	}
2424	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2425		stcb->asoc.num_send_timers_up--;
2426		if (stcb->asoc.num_send_timers_up < 0) {
2427			stcb->asoc.num_send_timers_up = 0;
2428		}
2429	}
2430	tmr->self = NULL;
2431	tmr->stopped_from = from;
2432	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2433	return;
2434}
2435
2436uint32_t
2437sctp_calculate_len(struct mbuf *m)
2438{
2439	uint32_t tlen = 0;
2440	struct mbuf *at;
2441
2442	at = m;
2443	while (at) {
2444		tlen += SCTP_BUF_LEN(at);
2445		at = SCTP_BUF_NEXT(at);
2446	}
2447	return (tlen);
2448}
2449
2450void
2451sctp_mtu_size_reset(struct sctp_inpcb *inp,
2452    struct sctp_association *asoc, uint32_t mtu)
2453{
2454	/*
2455	 * Reset the P-MTU size on this association, this involves changing
2456	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2457	 * allow the DF flag to be cleared.
2458	 */
2459	struct sctp_tmit_chunk *chk;
2460	unsigned int eff_mtu, ovh;
2461
2462	asoc->smallest_mtu = mtu;
2463	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2464		ovh = SCTP_MIN_OVERHEAD;
2465	} else {
2466		ovh = SCTP_MIN_V4_OVERHEAD;
2467	}
2468	eff_mtu = mtu - ovh;
2469	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2470		if (chk->send_size > eff_mtu) {
2471			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2472		}
2473	}
2474	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2475		if (chk->send_size > eff_mtu) {
2476			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2477		}
2478	}
2479}
2480
2481
2482/*
2483 * given an association and starting time of the current RTT period return
2484 * RTO in number of msecs net should point to the current network
2485 */
2486
2487uint32_t
2488sctp_calculate_rto(struct sctp_tcb *stcb,
2489		   struct sctp_association *asoc,
2490		   struct sctp_nets *net,
2491		   struct timeval *told,
2492		   int safe, int rtt_from_sack)
2493{
2494	/*-
2495	 * given an association and the starting time of the current RTT
2496	 * period (in value1/value2) return RTO in number of msecs.
2497	 */
2498	int32_t rtt; /* RTT in ms */
2499	uint32_t new_rto;
2500	int first_measure = 0;
2501	struct timeval now, then, *old;
2502
2503	/* Copy it out for sparc64 */
2504	if (safe == sctp_align_unsafe_makecopy) {
2505		old = &then;
2506		memcpy(&then, told, sizeof(struct timeval));
2507	} else if (safe == sctp_align_safe_nocopy) {
2508		old = told;
2509	} else {
2510		/* error */
2511		SCTP_PRINTF("Huh, bad rto calc call\n");
2512		return (0);
2513	}
2514	/************************/
2515	/* 1. calculate new RTT */
2516	/************************/
2517	/* get the current time */
2518	if (stcb->asoc.use_precise_time) {
2519		(void)SCTP_GETPTIME_TIMEVAL(&now);
2520	} else {
2521		(void)SCTP_GETTIME_TIMEVAL(&now);
2522	}
2523	timevalsub(&now, old);
2524	/* store the current RTT in us */
2525	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2526	           (uint64_t)now.tv_usec;
2527	/* computer rtt in ms */
2528	rtt = net->rtt / 1000;
2529	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2530		/* Tell the CC module that a new update has just occurred from a sack */
2531		(*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
2532	}
2533	/* Do we need to determine the lan? We do this only
2534	 * on sacks i.e. RTT being determined from data not
2535	 * non-data (HB/INIT->INITACK).
2536	 */
2537	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2538	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2539		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2540			net->lan_type = SCTP_LAN_INTERNET;
2541		} else {
2542			net->lan_type = SCTP_LAN_LOCAL;
2543		}
2544	}
2545
2546	/***************************/
2547	/* 2. update RTTVAR & SRTT */
2548	/***************************/
2549	/*-
2550	 * Compute the scaled average lastsa and the
2551	 * scaled variance lastsv as described in van Jacobson
2552	 * Paper "Congestion Avoidance and Control", Annex A.
2553	 *
2554	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2555	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2556	 */
2557	if (net->RTO_measured) {
2558		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2559		net->lastsa += rtt;
2560		if (rtt < 0) {
2561			rtt = -rtt;
2562		}
2563		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2564		net->lastsv += rtt;
2565		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2566			rto_logging(net, SCTP_LOG_RTTVAR);
2567		}
2568	} else {
2569		/* First RTO measurment */
2570		net->RTO_measured = 1;
2571		first_measure = 1;
2572		net->lastsa = rtt << SCTP_RTT_SHIFT;
2573		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2574		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2575			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2576		}
2577	}
2578	if (net->lastsv == 0) {
2579		net->lastsv = SCTP_CLOCK_GRANULARITY;
2580	}
2581	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2582	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2583	    (stcb->asoc.sat_network_lockout == 0)) {
2584		stcb->asoc.sat_network = 1;
2585	} else if ((!first_measure) && stcb->asoc.sat_network) {
2586		stcb->asoc.sat_network = 0;
2587		stcb->asoc.sat_network_lockout = 1;
2588	}
2589	/* bound it, per C6/C7 in Section 5.3.1 */
2590	if (new_rto < stcb->asoc.minrto) {
2591		new_rto = stcb->asoc.minrto;
2592	}
2593	if (new_rto > stcb->asoc.maxrto) {
2594		new_rto = stcb->asoc.maxrto;
2595	}
2596	/* we are now returning the RTO */
2597	return (new_rto);
2598}
2599
2600/*
2601 * return a pointer to a contiguous piece of data from the given mbuf chain
2602 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2603 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2604 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2605 */
2606caddr_t
2607sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2608{
2609	uint32_t count;
2610	uint8_t *ptr;
2611
2612	ptr = in_ptr;
2613	if ((off < 0) || (len <= 0))
2614		return (NULL);
2615
2616	/* find the desired start location */
2617	while ((m != NULL) && (off > 0)) {
2618		if (off < SCTP_BUF_LEN(m))
2619			break;
2620		off -= SCTP_BUF_LEN(m);
2621		m = SCTP_BUF_NEXT(m);
2622	}
2623	if (m == NULL)
2624		return (NULL);
2625
2626	/* is the current mbuf large enough (eg. contiguous)? */
2627	if ((SCTP_BUF_LEN(m) - off) >= len) {
2628		return (mtod(m, caddr_t) + off);
2629	} else {
2630		/* else, it spans more than one mbuf, so save a temp copy... */
2631		while ((m != NULL) && (len > 0)) {
2632			count = min(SCTP_BUF_LEN(m) - off, len);
2633			bcopy(mtod(m, caddr_t) + off, ptr, count);
2634			len -= count;
2635			ptr += count;
2636			off = 0;
2637			m = SCTP_BUF_NEXT(m);
2638		}
2639		if ((m == NULL) && (len > 0))
2640			return (NULL);
2641		else
2642			return ((caddr_t)in_ptr);
2643	}
2644}
2645
2646
2647
2648struct sctp_paramhdr *
2649sctp_get_next_param(struct mbuf *m,
2650    int offset,
2651    struct sctp_paramhdr *pull,
2652    int pull_limit)
2653{
2654	/* This just provides a typed signature to Peter's Pull routine */
2655	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2656	    (uint8_t *) pull));
2657}
2658
2659
2660struct mbuf *
2661sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2662{
2663	struct mbuf *m_last;
2664	caddr_t dp;
2665
2666	if (padlen > 3) {
2667		return (NULL);
2668	}
2669	if (padlen <= M_TRAILINGSPACE(m)) {
2670		/*
2671		 * The easy way. We hope the majority of the time we hit
2672		 * here :)
2673		 */
2674		m_last = m;
2675	} else {
2676		/* Hard way we must grow the mbuf chain */
2677		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2678		if (m_last == NULL) {
2679			return (NULL);
2680		}
2681		SCTP_BUF_LEN(m_last) = 0;
2682		SCTP_BUF_NEXT(m_last) = NULL;
2683		SCTP_BUF_NEXT(m) = m_last;
2684	}
2685	dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
2686	SCTP_BUF_LEN(m_last) += padlen;
2687	memset(dp, 0, padlen);
2688	return (m_last);
2689}
2690
2691struct mbuf *
2692sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2693{
2694	/* find the last mbuf in chain and pad it */
2695	struct mbuf *m_at;
2696
2697	if (last_mbuf != NULL) {
2698		return (sctp_add_pad_tombuf(last_mbuf, padval));
2699	} else {
2700		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2701			if (SCTP_BUF_NEXT(m_at) == NULL) {
2702				return (sctp_add_pad_tombuf(m_at, padval));
2703			}
2704		}
2705	}
2706	return (NULL);
2707}
2708
2709static void
2710sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2711    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2712#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2713    SCTP_UNUSED
2714#endif
2715    )
2716{
2717	struct mbuf *m_notify;
2718	struct sctp_assoc_change *sac;
2719	struct sctp_queued_to_read *control;
2720	size_t notif_len, abort_len;
2721	unsigned int i;
2722#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2723	struct socket *so;
2724#endif
2725
2726	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2727		notif_len = sizeof(struct sctp_assoc_change);
2728		if (abort != NULL) {
2729			abort_len = ntohs(abort->ch.chunk_length);
2730		} else {
2731			abort_len = 0;
2732		}
2733		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2734			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2735		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2736			notif_len += abort_len;
2737		}
2738		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2739		if (m_notify == NULL) {
2740			/* Retry with smaller value. */
2741			notif_len = sizeof(struct sctp_assoc_change);
2742			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2743			if (m_notify == NULL) {
2744				goto set_error;
2745			}
2746		}
2747		SCTP_BUF_NEXT(m_notify) = NULL;
2748		sac = mtod(m_notify, struct sctp_assoc_change *);
2749		memset(sac, 0, notif_len);
2750		sac->sac_type = SCTP_ASSOC_CHANGE;
2751		sac->sac_flags = 0;
2752		sac->sac_length = sizeof(struct sctp_assoc_change);
2753		sac->sac_state = state;
2754		sac->sac_error = error;
2755		/* XXX verify these stream counts */
2756		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2757		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2758		sac->sac_assoc_id = sctp_get_associd(stcb);
2759		if (notif_len > sizeof(struct sctp_assoc_change)) {
2760			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2761				i = 0;
2762				if (stcb->asoc.prsctp_supported == 1) {
2763					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2764				}
2765				if (stcb->asoc.auth_supported == 1) {
2766					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2767				}
2768				if (stcb->asoc.asconf_supported == 1) {
2769					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2770				}
2771				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2772				if (stcb->asoc.reconfig_supported == 1) {
2773					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2774				}
2775				sac->sac_length += i;
2776			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2777				memcpy(sac->sac_info, abort, abort_len);
2778				sac->sac_length += abort_len;
2779			}
2780		}
2781		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2782		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2783		                                 0, 0, stcb->asoc.context, 0, 0, 0,
2784		                                 m_notify);
2785		if (control != NULL) {
2786			control->length = SCTP_BUF_LEN(m_notify);
2787			/* not that we need this */
2788			control->tail_mbuf = m_notify;
2789			control->spec_flags = M_NOTIFICATION;
2790			sctp_add_to_readq(stcb->sctp_ep, stcb,
2791			                  control,
2792			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2793			                  so_locked);
2794		} else {
2795			sctp_m_freem(m_notify);
2796		}
2797	}
2798	/*
2799	 * For 1-to-1 style sockets, we send up and error when an ABORT
2800	 * comes in.
2801	 */
2802set_error:
2803	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2804	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2805	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2806		SOCK_LOCK(stcb->sctp_socket);
2807		if (from_peer) {
2808			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2809				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2810				stcb->sctp_socket->so_error = ECONNREFUSED;
2811			} else {
2812				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2813				stcb->sctp_socket->so_error = ECONNRESET;
2814			}
2815		} else {
2816			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2817			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2818				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2819				stcb->sctp_socket->so_error = ETIMEDOUT;
2820			} else {
2821				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2822				stcb->sctp_socket->so_error = ECONNABORTED;
2823			}
2824		}
2825	}
2826	/* Wake ANY sleepers */
2827#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2828	so = SCTP_INP_SO(stcb->sctp_ep);
2829	if (!so_locked) {
2830		atomic_add_int(&stcb->asoc.refcnt, 1);
2831		SCTP_TCB_UNLOCK(stcb);
2832		SCTP_SOCKET_LOCK(so, 1);
2833		SCTP_TCB_LOCK(stcb);
2834		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2835		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2836			SCTP_SOCKET_UNLOCK(so, 1);
2837			return;
2838		}
2839	}
2840#endif
2841	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2842	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2843	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2844#if defined(__APPLE__)
2845		socantrcvmore(stcb->sctp_socket);
2846#else
2847		socantrcvmore_locked(stcb->sctp_socket);
2848#endif
2849	}
2850	sorwakeup(stcb->sctp_socket);
2851	sowwakeup(stcb->sctp_socket);
2852#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2853	if (!so_locked) {
2854		SCTP_SOCKET_UNLOCK(so, 1);
2855	}
2856#endif
2857}
2858
2859static void
2860sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2861    struct sockaddr *sa, uint32_t error)
2862{
2863	struct mbuf *m_notify;
2864	struct sctp_paddr_change *spc;
2865	struct sctp_queued_to_read *control;
2866
2867	if ((stcb == NULL) ||
2868	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2869		/* event not enabled */
2870		return;
2871	}
2872	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2873	if (m_notify == NULL)
2874		return;
2875	SCTP_BUF_LEN(m_notify) = 0;
2876	spc = mtod(m_notify, struct sctp_paddr_change *);
2877	memset(spc, 0, sizeof(struct sctp_paddr_change));
2878	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2879	spc->spc_flags = 0;
2880	spc->spc_length = sizeof(struct sctp_paddr_change);
2881	switch (sa->sa_family) {
2882#ifdef INET
2883	case AF_INET:
2884		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2885		break;
2886#endif
2887#ifdef INET6
2888	case AF_INET6:
2889	{
2890#ifdef SCTP_EMBEDDED_V6_SCOPE
2891		struct sockaddr_in6 *sin6;
2892#endif /* SCTP_EMBEDDED_V6_SCOPE */
2893		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2894
2895#ifdef SCTP_EMBEDDED_V6_SCOPE
2896		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2897		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2898			if (sin6->sin6_scope_id == 0) {
2899				/* recover scope_id for user */
2900#ifdef SCTP_KAME
2901				(void)sa6_recoverscope(sin6);
2902#else
2903				(void)in6_recoverscope(sin6, &sin6->sin6_addr,
2904						       NULL);
2905#endif
2906			} else {
2907				/* clear embedded scope_id for user */
2908				in6_clearscope(&sin6->sin6_addr);
2909			}
2910		}
2911#endif /* SCTP_EMBEDDED_V6_SCOPE */
2912		break;
2913	}
2914#endif
2915#if defined(__Userspace__)
2916	case AF_CONN:
2917		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
2918		break;
2919#endif
2920	default:
2921		/* TSNH */
2922		break;
2923	}
2924	spc->spc_state = state;
2925	spc->spc_error = error;
2926	spc->spc_assoc_id = sctp_get_associd(stcb);
2927
2928	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2929	SCTP_BUF_NEXT(m_notify) = NULL;
2930
2931	/* append to socket */
2932	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2933	                                 0, 0, stcb->asoc.context, 0, 0, 0,
2934	                                 m_notify);
2935	if (control == NULL) {
2936		/* no memory */
2937		sctp_m_freem(m_notify);
2938		return;
2939	}
2940	control->length = SCTP_BUF_LEN(m_notify);
2941	control->spec_flags = M_NOTIFICATION;
2942	/* not that we need this */
2943	control->tail_mbuf = m_notify;
2944	sctp_add_to_readq(stcb->sctp_ep, stcb,
2945	                  control,
2946	                  &stcb->sctp_socket->so_rcv, 1,
2947	                  SCTP_READ_LOCK_NOT_HELD,
2948	                  SCTP_SO_NOT_LOCKED);
2949}
2950
2951
2952static void
2953sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2954    struct sctp_tmit_chunk *chk, int so_locked
2955#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2956    SCTP_UNUSED
2957#endif
2958    )
2959{
2960	struct mbuf *m_notify;
2961	struct sctp_send_failed *ssf;
2962	struct sctp_send_failed_event *ssfe;
2963	struct sctp_queued_to_read *control;
2964	int length;
2965
2966	if ((stcb == NULL) ||
2967	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2968	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2969		/* event not enabled */
2970		return;
2971	}
2972
2973	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2974		length = sizeof(struct sctp_send_failed_event);
2975	} else {
2976		length = sizeof(struct sctp_send_failed);
2977	}
2978	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2979	if (m_notify == NULL)
2980		/* no space left */
2981		return;
2982	SCTP_BUF_LEN(m_notify) = 0;
2983	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2984		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2985		memset(ssfe, 0, length);
2986		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2987		if (sent) {
2988			ssfe->ssfe_flags = SCTP_DATA_SENT;
2989		} else {
2990			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2991		}
2992		length += chk->send_size;
2993		length -= sizeof(struct sctp_data_chunk);
2994		ssfe->ssfe_length = length;
2995		ssfe->ssfe_error = error;
2996		/* not exactly what the user sent in, but should be close :) */
2997		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2998		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2999		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
3000		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3001		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3002		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3003		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3004	} else {
3005		ssf = mtod(m_notify, struct sctp_send_failed *);
3006		memset(ssf, 0, length);
3007		ssf->ssf_type = SCTP_SEND_FAILED;
3008		if (sent) {
3009			ssf->ssf_flags = SCTP_DATA_SENT;
3010		} else {
3011			ssf->ssf_flags = SCTP_DATA_UNSENT;
3012		}
3013		length += chk->send_size;
3014		length -= sizeof(struct sctp_data_chunk);
3015		ssf->ssf_length = length;
3016		ssf->ssf_error = error;
3017		/* not exactly what the user sent in, but should be close :) */
3018		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3019		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3020		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3021		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3022		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3023		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3024		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3025		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3026		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3027	}
3028	if (chk->data) {
3029		/*
3030		 * trim off the sctp chunk header(it should
3031		 * be there)
3032		 */
3033		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3034			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3035			sctp_mbuf_crush(chk->data);
3036			chk->send_size -= sizeof(struct sctp_data_chunk);
3037		}
3038	}
3039	SCTP_BUF_NEXT(m_notify) = chk->data;
3040	/* Steal off the mbuf */
3041	chk->data = NULL;
3042	/*
3043	 * For this case, we check the actual socket buffer, since the assoc
3044	 * is going away we don't want to overfill the socket buffer for a
3045	 * non-reader
3046	 */
3047	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3048		sctp_m_freem(m_notify);
3049		return;
3050	}
3051	/* append to socket */
3052	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3053	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3054	                                 m_notify);
3055	if (control == NULL) {
3056		/* no memory */
3057		sctp_m_freem(m_notify);
3058		return;
3059	}
3060	control->spec_flags = M_NOTIFICATION;
3061	sctp_add_to_readq(stcb->sctp_ep, stcb,
3062	                  control,
3063	                  &stcb->sctp_socket->so_rcv, 1,
3064	                  SCTP_READ_LOCK_NOT_HELD,
3065	                  so_locked);
3066}
3067
3068
3069static void
3070sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3071			 struct sctp_stream_queue_pending *sp, int so_locked
3072#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3073                         SCTP_UNUSED
3074#endif
3075                         )
3076{
3077	struct mbuf *m_notify;
3078	struct sctp_send_failed *ssf;
3079	struct sctp_send_failed_event *ssfe;
3080	struct sctp_queued_to_read *control;
3081	int length;
3082
3083	if ((stcb == NULL) ||
3084	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3085	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3086		/* event not enabled */
3087		return;
3088	}
3089	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3090		length = sizeof(struct sctp_send_failed_event);
3091	} else {
3092		length = sizeof(struct sctp_send_failed);
3093	}
3094	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3095	if (m_notify == NULL) {
3096		/* no space left */
3097		return;
3098	}
3099	SCTP_BUF_LEN(m_notify) = 0;
3100	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3101		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3102		memset(ssfe, 0, length);
3103		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3104		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3105		length += sp->length;
3106		ssfe->ssfe_length = length;
3107		ssfe->ssfe_error = error;
3108		/* not exactly what the user sent in, but should be close :) */
3109		ssfe->ssfe_info.snd_sid = sp->stream;
3110		if (sp->some_taken) {
3111			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3112		} else {
3113			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3114		}
3115		ssfe->ssfe_info.snd_ppid = sp->ppid;
3116		ssfe->ssfe_info.snd_context = sp->context;
3117		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3118		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3119		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3120	} else {
3121		ssf = mtod(m_notify, struct sctp_send_failed *);
3122		memset(ssf, 0, length);
3123		ssf->ssf_type = SCTP_SEND_FAILED;
3124		ssf->ssf_flags = SCTP_DATA_UNSENT;
3125		length += sp->length;
3126		ssf->ssf_length = length;
3127		ssf->ssf_error = error;
3128		/* not exactly what the user sent in, but should be close :) */
3129		ssf->ssf_info.sinfo_stream = sp->stream;
3130		ssf->ssf_info.sinfo_ssn = 0;
3131		if (sp->some_taken) {
3132			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3133		} else {
3134			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3135		}
3136		ssf->ssf_info.sinfo_ppid = sp->ppid;
3137		ssf->ssf_info.sinfo_context = sp->context;
3138		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3139		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3140		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3141	}
3142	SCTP_BUF_NEXT(m_notify) = sp->data;
3143
3144	/* Steal off the mbuf */
3145	sp->data = NULL;
3146	/*
3147	 * For this case, we check the actual socket buffer, since the assoc
3148	 * is going away we don't want to overfill the socket buffer for a
3149	 * non-reader
3150	 */
3151	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3152		sctp_m_freem(m_notify);
3153		return;
3154	}
3155	/* append to socket */
3156	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3157	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3158	                                 m_notify);
3159	if (control == NULL) {
3160		/* no memory */
3161		sctp_m_freem(m_notify);
3162		return;
3163	}
3164	control->spec_flags = M_NOTIFICATION;
3165	sctp_add_to_readq(stcb->sctp_ep, stcb,
3166	    control,
3167	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3168}
3169
3170
3171
3172static void
3173sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3174{
3175	struct mbuf *m_notify;
3176	struct sctp_adaptation_event *sai;
3177	struct sctp_queued_to_read *control;
3178
3179	if ((stcb == NULL) ||
3180	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3181		/* event not enabled */
3182		return;
3183	}
3184
3185	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3186	if (m_notify == NULL)
3187		/* no space left */
3188		return;
3189	SCTP_BUF_LEN(m_notify) = 0;
3190	sai = mtod(m_notify, struct sctp_adaptation_event *);
3191	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3192	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3193	sai->sai_flags = 0;
3194	sai->sai_length = sizeof(struct sctp_adaptation_event);
3195	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3196	sai->sai_assoc_id = sctp_get_associd(stcb);
3197
3198	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3199	SCTP_BUF_NEXT(m_notify) = NULL;
3200
3201	/* append to socket */
3202	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3203	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3204	                                 m_notify);
3205	if (control == NULL) {
3206		/* no memory */
3207		sctp_m_freem(m_notify);
3208		return;
3209	}
3210	control->length = SCTP_BUF_LEN(m_notify);
3211	control->spec_flags = M_NOTIFICATION;
3212	/* not that we need this */
3213	control->tail_mbuf = m_notify;
3214	sctp_add_to_readq(stcb->sctp_ep, stcb,
3215	    control,
3216	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3217}
3218
3219/* This always must be called with the read-queue LOCKED in the INP */
3220static void
3221sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3222					uint32_t val, int so_locked
3223#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3224                             SCTP_UNUSED
3225#endif
3226                                        )
3227{
3228	struct mbuf *m_notify;
3229	struct sctp_pdapi_event *pdapi;
3230	struct sctp_queued_to_read *control;
3231	struct sockbuf *sb;
3232
3233	if ((stcb == NULL) ||
3234	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3235		/* event not enabled */
3236		return;
3237	}
3238	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3239		return;
3240	}
3241
3242	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3243	if (m_notify == NULL)
3244		/* no space left */
3245		return;
3246	SCTP_BUF_LEN(m_notify) = 0;
3247	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3248	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3249	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3250	pdapi->pdapi_flags = 0;
3251	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3252	pdapi->pdapi_indication = error;
3253	pdapi->pdapi_stream = (val >> 16);
3254	pdapi->pdapi_seq = (val & 0x0000ffff);
3255	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3256
3257	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3258	SCTP_BUF_NEXT(m_notify) = NULL;
3259	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3260					 0, 0, stcb->asoc.context, 0, 0, 0,
3261					 m_notify);
3262	if (control == NULL) {
3263		/* no memory */
3264		sctp_m_freem(m_notify);
3265		return;
3266	}
3267	control->spec_flags = M_NOTIFICATION;
3268	control->length = SCTP_BUF_LEN(m_notify);
3269	/* not that we need this */
3270	control->tail_mbuf = m_notify;
3271	control->held_length = 0;
3272	control->length = 0;
3273	sb = &stcb->sctp_socket->so_rcv;
3274	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3275		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3276	}
3277	sctp_sballoc(stcb, sb, m_notify);
3278	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3279		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3280	}
3281	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3282	control->end_added = 1;
3283	if (stcb->asoc.control_pdapi)
3284		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi,  control, next);
3285	else {
3286		/* we really should not see this case */
3287		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3288	}
3289	if (stcb->sctp_ep && stcb->sctp_socket) {
3290		/* This should always be the case */
3291#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3292		struct socket *so;
3293
3294		so = SCTP_INP_SO(stcb->sctp_ep);
3295		if (!so_locked) {
3296			atomic_add_int(&stcb->asoc.refcnt, 1);
3297			SCTP_TCB_UNLOCK(stcb);
3298			SCTP_SOCKET_LOCK(so, 1);
3299			SCTP_TCB_LOCK(stcb);
3300			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3301			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3302				SCTP_SOCKET_UNLOCK(so, 1);
3303				return;
3304			}
3305		}
3306#endif
3307		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3308#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3309		if (!so_locked) {
3310			SCTP_SOCKET_UNLOCK(so, 1);
3311		}
3312#endif
3313	}
3314}
3315
3316static void
3317sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3318{
3319	struct mbuf *m_notify;
3320	struct sctp_shutdown_event *sse;
3321	struct sctp_queued_to_read *control;
3322
3323	/*
3324	 * For TCP model AND UDP connected sockets we will send an error up
3325	 * when an SHUTDOWN completes
3326	 */
3327	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3328	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3329		/* mark socket closed for read/write and wakeup! */
3330#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3331		struct socket *so;
3332
3333		so = SCTP_INP_SO(stcb->sctp_ep);
3334		atomic_add_int(&stcb->asoc.refcnt, 1);
3335		SCTP_TCB_UNLOCK(stcb);
3336		SCTP_SOCKET_LOCK(so, 1);
3337		SCTP_TCB_LOCK(stcb);
3338		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3339		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3340			SCTP_SOCKET_UNLOCK(so, 1);
3341			return;
3342		}
3343#endif
3344		socantsendmore(stcb->sctp_socket);
3345#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3346		SCTP_SOCKET_UNLOCK(so, 1);
3347#endif
3348	}
3349	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3350		/* event not enabled */
3351		return;
3352	}
3353
3354	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3355	if (m_notify == NULL)
3356		/* no space left */
3357		return;
3358	sse = mtod(m_notify, struct sctp_shutdown_event *);
3359	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3360	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3361	sse->sse_flags = 0;
3362	sse->sse_length = sizeof(struct sctp_shutdown_event);
3363	sse->sse_assoc_id = sctp_get_associd(stcb);
3364
3365	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3366	SCTP_BUF_NEXT(m_notify) = NULL;
3367
3368	/* append to socket */
3369	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3370	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3371	                                 m_notify);
3372	if (control == NULL) {
3373		/* no memory */
3374		sctp_m_freem(m_notify);
3375		return;
3376	}
3377	control->spec_flags = M_NOTIFICATION;
3378	control->length = SCTP_BUF_LEN(m_notify);
3379	/* not that we need this */
3380	control->tail_mbuf = m_notify;
3381	sctp_add_to_readq(stcb->sctp_ep, stcb,
3382	    control,
3383	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3384}
3385
3386static void
3387sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3388                             int so_locked
3389#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3390                             SCTP_UNUSED
3391#endif
3392                             )
3393{
3394	struct mbuf *m_notify;
3395	struct sctp_sender_dry_event *event;
3396	struct sctp_queued_to_read *control;
3397
3398	if ((stcb == NULL) ||
3399	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3400		/* event not enabled */
3401		return;
3402	}
3403
3404	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3405	if (m_notify == NULL) {
3406		/* no space left */
3407		return;
3408	}
3409	SCTP_BUF_LEN(m_notify) = 0;
3410	event = mtod(m_notify, struct sctp_sender_dry_event *);
3411	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3412	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3413	event->sender_dry_flags = 0;
3414	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3415	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3416
3417	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3418	SCTP_BUF_NEXT(m_notify) = NULL;
3419
3420	/* append to socket */
3421	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3422	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3423	                                 m_notify);
3424	if (control == NULL) {
3425		/* no memory */
3426		sctp_m_freem(m_notify);
3427		return;
3428	}
3429	control->length = SCTP_BUF_LEN(m_notify);
3430	control->spec_flags = M_NOTIFICATION;
3431	/* not that we need this */
3432	control->tail_mbuf = m_notify;
3433	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3434	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3435}
3436
3437
3438void
3439sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3440{
3441	struct mbuf *m_notify;
3442	struct sctp_queued_to_read *control;
3443	struct sctp_stream_change_event *stradd;
3444
3445	if ((stcb == NULL) ||
3446	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3447		/* event not enabled */
3448		return;
3449	}
3450	if ((stcb->asoc.peer_req_out) && flag) {
3451		/* Peer made the request, don't tell the local user */
3452		stcb->asoc.peer_req_out = 0;
3453		return;
3454	}
3455	stcb->asoc.peer_req_out = 0;
3456	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3457	if (m_notify == NULL)
3458		/* no space left */
3459		return;
3460	SCTP_BUF_LEN(m_notify) = 0;
3461	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3462	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3463	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3464	stradd->strchange_flags = flag;
3465	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3466	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3467	stradd->strchange_instrms = numberin;
3468	stradd->strchange_outstrms = numberout;
3469	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3470	SCTP_BUF_NEXT(m_notify) = NULL;
3471	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3472		/* no space */
3473		sctp_m_freem(m_notify);
3474		return;
3475	}
3476	/* append to socket */
3477	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3478	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3479	                                 m_notify);
3480	if (control == NULL) {
3481		/* no memory */
3482		sctp_m_freem(m_notify);
3483		return;
3484	}
3485	control->spec_flags = M_NOTIFICATION;
3486	control->length = SCTP_BUF_LEN(m_notify);
3487	/* not that we need this */
3488	control->tail_mbuf = m_notify;
3489	sctp_add_to_readq(stcb->sctp_ep, stcb,
3490	    control,
3491	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3492}
3493
3494void
3495sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3496{
3497	struct mbuf *m_notify;
3498	struct sctp_queued_to_read *control;
3499	struct sctp_assoc_reset_event *strasoc;
3500
3501	if ((stcb == NULL) ||
3502	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3503		/* event not enabled */
3504		return;
3505	}
3506	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3507	if (m_notify == NULL)
3508		/* no space left */
3509		return;
3510	SCTP_BUF_LEN(m_notify) = 0;
3511	strasoc = mtod(m_notify, struct sctp_assoc_reset_event  *);
3512	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3513	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3514	strasoc->assocreset_flags = flag;
3515	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3516	strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
3517	strasoc->assocreset_local_tsn = sending_tsn;
3518	strasoc->assocreset_remote_tsn = recv_tsn;
3519	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3520	SCTP_BUF_NEXT(m_notify) = NULL;
3521	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3522		/* no space */
3523		sctp_m_freem(m_notify);
3524		return;
3525	}
3526	/* append to socket */
3527	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3528	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3529	                                 m_notify);
3530	if (control == NULL) {
3531		/* no memory */
3532		sctp_m_freem(m_notify);
3533		return;
3534	}
3535	control->spec_flags = M_NOTIFICATION;
3536	control->length = SCTP_BUF_LEN(m_notify);
3537	/* not that we need this */
3538	control->tail_mbuf = m_notify;
3539	sctp_add_to_readq(stcb->sctp_ep, stcb,
3540	    control,
3541	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3542}
3543
3544
3545
3546static void
3547sctp_notify_stream_reset(struct sctp_tcb *stcb,
3548    int number_entries, uint16_t * list, int flag)
3549{
3550	struct mbuf *m_notify;
3551	struct sctp_queued_to_read *control;
3552	struct sctp_stream_reset_event *strreset;
3553	int len;
3554
3555	if ((stcb == NULL) ||
3556	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3557		/* event not enabled */
3558		return;
3559	}
3560
3561	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3562	if (m_notify == NULL)
3563		/* no space left */
3564		return;
3565	SCTP_BUF_LEN(m_notify) = 0;
3566	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3567	if (len > M_TRAILINGSPACE(m_notify)) {
3568		/* never enough room */
3569		sctp_m_freem(m_notify);
3570		return;
3571	}
3572	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3573	memset(strreset, 0, len);
3574	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3575	strreset->strreset_flags = flag;
3576	strreset->strreset_length = len;
3577	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3578	if (number_entries) {
3579		int i;
3580
3581		for (i = 0; i < number_entries; i++) {
3582			strreset->strreset_stream_list[i] = ntohs(list[i]);
3583		}
3584	}
3585	SCTP_BUF_LEN(m_notify) = len;
3586	SCTP_BUF_NEXT(m_notify) = NULL;
3587	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3588		/* no space */
3589		sctp_m_freem(m_notify);
3590		return;
3591	}
3592	/* append to socket */
3593	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3594	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3595	                                 m_notify);
3596	if (control == NULL) {
3597		/* no memory */
3598		sctp_m_freem(m_notify);
3599		return;
3600	}
3601	control->spec_flags = M_NOTIFICATION;
3602	control->length = SCTP_BUF_LEN(m_notify);
3603	/* not that we need this */
3604	control->tail_mbuf = m_notify;
3605	sctp_add_to_readq(stcb->sctp_ep, stcb,
3606	                  control,
3607	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3608}
3609
3610
3611static void
3612sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3613{
3614	struct mbuf *m_notify;
3615	struct sctp_remote_error *sre;
3616	struct sctp_queued_to_read *control;
3617	size_t notif_len, chunk_len;
3618
3619	if ((stcb == NULL) ||
3620	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3621		return;
3622	}
3623	if (chunk != NULL) {
3624		chunk_len = ntohs(chunk->ch.chunk_length);
3625	} else {
3626		chunk_len = 0;
3627	}
3628	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3629	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3630	if (m_notify == NULL) {
3631		/* Retry with smaller value. */
3632		notif_len = sizeof(struct sctp_remote_error);
3633		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3634		if (m_notify == NULL) {
3635			return;
3636		}
3637	}
3638	SCTP_BUF_NEXT(m_notify) = NULL;
3639	sre = mtod(m_notify, struct sctp_remote_error *);
3640	memset(sre, 0, notif_len);
3641	sre->sre_type = SCTP_REMOTE_ERROR;
3642	sre->sre_flags = 0;
3643	sre->sre_length = sizeof(struct sctp_remote_error);
3644	sre->sre_error = error;
3645	sre->sre_assoc_id = sctp_get_associd(stcb);
3646	if (notif_len > sizeof(struct sctp_remote_error)) {
3647		memcpy(sre->sre_data, chunk, chunk_len);
3648		sre->sre_length += chunk_len;
3649	}
3650	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3651	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3652	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3653	                                 m_notify);
3654	if (control != NULL) {
3655		control->length = SCTP_BUF_LEN(m_notify);
3656		/* not that we need this */
3657		control->tail_mbuf = m_notify;
3658		control->spec_flags = M_NOTIFICATION;
3659		sctp_add_to_readq(stcb->sctp_ep, stcb,
3660		                  control,
3661		                  &stcb->sctp_socket->so_rcv, 1,
3662				  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3663	} else {
3664		sctp_m_freem(m_notify);
3665	}
3666}
3667
3668
3669void
3670sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3671    uint32_t error, void *data, int so_locked
3672#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3673    SCTP_UNUSED
3674#endif
3675    )
3676{
3677	if ((stcb == NULL) ||
3678	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3679	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3680	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3681		/* If the socket is gone we are out of here */
3682		return;
3683	}
3684#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
3685	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3686#else
3687	if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
3688#endif
3689		return;
3690	}
3691#if defined(__APPLE__)
3692	if (so_locked) {
3693		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3694	} else {
3695		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3696	}
3697#endif
3698	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3699	    (stcb->asoc.state &  SCTP_STATE_COOKIE_ECHOED)) {
3700		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3701		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3702		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3703			/* Don't report these in front states */
3704			return;
3705		}
3706	}
3707	switch (notification) {
3708	case SCTP_NOTIFY_ASSOC_UP:
3709		if (stcb->asoc.assoc_up_sent == 0) {
3710			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3711			stcb->asoc.assoc_up_sent = 1;
3712		}
3713		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3714			sctp_notify_adaptation_layer(stcb);
3715		}
3716		if (stcb->asoc.auth_supported == 0) {
3717			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3718			                NULL, so_locked);
3719		}
3720		break;
3721	case SCTP_NOTIFY_ASSOC_DOWN:
3722		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3723#if defined(__Userspace__)
3724		if (stcb->sctp_ep->recv_callback) {
3725			if (stcb->sctp_socket) {
3726				union sctp_sockstore addr;
3727				struct sctp_rcvinfo rcv;
3728
3729				memset(&addr, 0, sizeof(union sctp_sockstore));
3730				memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
3731				atomic_add_int(&stcb->asoc.refcnt, 1);
3732				SCTP_TCB_UNLOCK(stcb);
3733				stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
3734				SCTP_TCB_LOCK(stcb);
3735				atomic_subtract_int(&stcb->asoc.refcnt, 1);
3736			}
3737		}
3738#endif
3739		break;
3740	case SCTP_NOTIFY_INTERFACE_DOWN:
3741		{
3742			struct sctp_nets *net;
3743
3744			net = (struct sctp_nets *)data;
3745			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3746			    (struct sockaddr *)&net->ro._l_addr, error);
3747			break;
3748		}
3749	case SCTP_NOTIFY_INTERFACE_UP:
3750		{
3751			struct sctp_nets *net;
3752
3753			net = (struct sctp_nets *)data;
3754			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3755			    (struct sockaddr *)&net->ro._l_addr, error);
3756			break;
3757		}
3758	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3759		{
3760			struct sctp_nets *net;
3761
3762			net = (struct sctp_nets *)data;
3763			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3764			    (struct sockaddr *)&net->ro._l_addr, error);
3765			break;
3766		}
3767	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3768		sctp_notify_send_failed2(stcb, error,
3769		                         (struct sctp_stream_queue_pending *)data, so_locked);
3770		break;
3771	case SCTP_NOTIFY_SENT_DG_FAIL:
3772		sctp_notify_send_failed(stcb, 1, error,
3773		    (struct sctp_tmit_chunk *)data, so_locked);
3774		break;
3775	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3776		sctp_notify_send_failed(stcb, 0, error,
3777		                        (struct sctp_tmit_chunk *)data, so_locked);
3778		break;
3779	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3780		{
3781			uint32_t val;
3782			val = *((uint32_t *)data);
3783
3784			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3785		break;
3786		}
3787	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3788		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3789		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3790			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3791		} else {
3792			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3793		}
3794		break;
3795	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3796		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3797		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3798			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3799		} else {
3800			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3801		}
3802		break;
3803	case SCTP_NOTIFY_ASSOC_RESTART:
3804		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3805		if (stcb->asoc.auth_supported == 0) {
3806			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3807			                NULL, so_locked);
3808		}
3809		break;
3810	case SCTP_NOTIFY_STR_RESET_SEND:
3811		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3812		break;
3813	case SCTP_NOTIFY_STR_RESET_RECV:
3814		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3815		break;
3816	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3817		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3818		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
3819		break;
3820	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3821		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3822		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
3823		break;
3824	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3825		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3826		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
3827		break;
3828	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3829		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3830		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
3831		break;
3832	case SCTP_NOTIFY_ASCONF_ADD_IP:
3833		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3834		    error);
3835		break;
3836	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3837		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3838		                             error);
3839		break;
3840	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3841		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3842		                             error);
3843		break;
3844	case SCTP_NOTIFY_PEER_SHUTDOWN:
3845		sctp_notify_shutdown_event(stcb);
3846		break;
3847	case SCTP_NOTIFY_AUTH_NEW_KEY:
3848		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3849		                           (uint16_t)(uintptr_t)data,
3850		                           so_locked);
3851		break;
3852	case SCTP_NOTIFY_AUTH_FREE_KEY:
3853		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3854		                           (uint16_t)(uintptr_t)data,
3855		                           so_locked);
3856		break;
3857	case SCTP_NOTIFY_NO_PEER_AUTH:
3858		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3859		                           (uint16_t)(uintptr_t)data,
3860		                           so_locked);
3861		break;
3862	case SCTP_NOTIFY_SENDER_DRY:
3863		sctp_notify_sender_dry_event(stcb, so_locked);
3864		break;
3865	case SCTP_NOTIFY_REMOTE_ERROR:
3866		sctp_notify_remote_error(stcb, error, data);
3867		break;
3868	default:
3869		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3870			__FUNCTION__, notification, notification);
3871		break;
3872	}			/* end switch */
3873}
3874
3875void
3876sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3877#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3878    SCTP_UNUSED
3879#endif
3880    )
3881{
3882	struct sctp_association *asoc;
3883	struct sctp_stream_out *outs;
3884	struct sctp_tmit_chunk *chk, *nchk;
3885	struct sctp_stream_queue_pending *sp, *nsp;
3886	int i;
3887
3888	if (stcb == NULL) {
3889		return;
3890	}
3891	asoc = &stcb->asoc;
3892	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3893		/* already being freed */
3894		return;
3895	}
3896#if defined(__APPLE__)
3897	if (so_locked) {
3898		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3899	} else {
3900		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3901	}
3902#endif
3903	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3904	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3905	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3906		return;
3907	}
3908	/* now through all the gunk freeing chunks */
3909	if (holds_lock == 0) {
3910		SCTP_TCB_SEND_LOCK(stcb);
3911	}
3912	/* sent queue SHOULD be empty */
3913	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3914		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3915		asoc->sent_queue_cnt--;
3916		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3917			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3918				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3919#ifdef INVARIANTS
3920			} else {
3921				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3922#endif
3923			}
3924		}
3925		if (chk->data != NULL) {
3926			sctp_free_bufspace(stcb, asoc, chk, 1);
3927			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3928			                error, chk, so_locked);
3929			if (chk->data) {
3930				sctp_m_freem(chk->data);
3931				chk->data = NULL;
3932			}
3933		}
3934		sctp_free_a_chunk(stcb, chk, so_locked);
3935		/*sa_ignore FREED_MEMORY*/
3936	}
3937	/* pending send queue SHOULD be empty */
3938	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3939		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3940		asoc->send_queue_cnt--;
3941		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3942			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3943#ifdef INVARIANTS
3944		} else {
3945			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3946#endif
3947		}
3948		if (chk->data != NULL) {
3949			sctp_free_bufspace(stcb, asoc, chk, 1);
3950			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3951			                error, chk, so_locked);
3952			if (chk->data) {
3953				sctp_m_freem(chk->data);
3954				chk->data = NULL;
3955			}
3956		}
3957		sctp_free_a_chunk(stcb, chk, so_locked);
3958		/*sa_ignore FREED_MEMORY*/
3959	}
3960	for (i = 0; i < asoc->streamoutcnt; i++) {
3961		/* For each stream */
3962		outs = &asoc->strmout[i];
3963		/* clean up any sends there */
3964		asoc->locked_on_sending = NULL;
3965		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3966			asoc->stream_queue_cnt--;
3967			TAILQ_REMOVE(&outs->outqueue, sp, next);
3968			sctp_free_spbufspace(stcb, asoc, sp);
3969			if (sp->data) {
3970				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3971						error, (void *)sp, so_locked);
3972				if (sp->data) {
3973					sctp_m_freem(sp->data);
3974					sp->data = NULL;
3975					sp->tail_mbuf = NULL;
3976					sp->length = 0;
3977				}
3978			}
3979			if (sp->net) {
3980				sctp_free_remote_addr(sp->net);
3981				sp->net = NULL;
3982			}
3983			/* Free the chunk */
3984			sctp_free_a_strmoq(stcb, sp, so_locked);
3985			/*sa_ignore FREED_MEMORY*/
3986		}
3987	}
3988
3989	if (holds_lock == 0) {
3990		SCTP_TCB_SEND_UNLOCK(stcb);
3991	}
3992}
3993
3994void
3995sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3996			struct sctp_abort_chunk *abort, int so_locked
3997#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3998    SCTP_UNUSED
3999#endif
4000    )
4001{
4002	if (stcb == NULL) {
4003		return;
4004	}
4005#if defined(__APPLE__)
4006	if (so_locked) {
4007		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4008	} else {
4009		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4010	}
4011#endif
4012	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4013	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4014	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4015		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4016	}
4017	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4018	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4019	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4020		return;
4021	}
4022	/* Tell them we lost the asoc */
4023	sctp_report_all_outbound(stcb, error, 1, so_locked);
4024	if (from_peer) {
4025		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4026	} else {
4027		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4028	}
4029}
4030
4031void
4032sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4033                       struct mbuf *m, int iphlen,
4034                       struct sockaddr *src, struct sockaddr *dst,
4035                       struct sctphdr *sh, struct mbuf *op_err,
4036#if defined(__FreeBSD__)
4037                       uint8_t use_mflowid, uint32_t mflowid,
4038#endif
4039                       uint32_t vrf_id, uint16_t port)
4040{
4041	uint32_t vtag;
4042#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4043	struct socket *so;
4044#endif
4045
4046	vtag = 0;
4047	if (stcb != NULL) {
4048		/* We have a TCB to abort, send notification too */
4049		vtag = stcb->asoc.peer_vtag;
4050		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4051		/* get the assoc vrf id and table id */
4052		vrf_id = stcb->asoc.vrf_id;
4053		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4054	}
4055	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4056#if defined(__FreeBSD__)
4057	                use_mflowid, mflowid,
4058#endif
4059	                vrf_id, port);
4060	if (stcb != NULL) {
4061		/* Ok, now lets free it */
4062#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4063		so = SCTP_INP_SO(inp);
4064		atomic_add_int(&stcb->asoc.refcnt, 1);
4065		SCTP_TCB_UNLOCK(stcb);
4066		SCTP_SOCKET_LOCK(so, 1);
4067		SCTP_TCB_LOCK(stcb);
4068		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4069#endif
4070		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4071		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4072		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4073			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4074		}
4075		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
4076#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4077		SCTP_SOCKET_UNLOCK(so, 1);
4078#endif
4079	}
4080}
4081#ifdef SCTP_ASOCLOG_OF_TSNS
4082void
4083sctp_print_out_track_log(struct sctp_tcb *stcb)
4084{
4085#ifdef NOSIY_PRINTS
4086	int i;
4087	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4088	SCTP_PRINTF("IN bound TSN log-aaa\n");
4089	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4090		SCTP_PRINTF("None rcvd\n");
4091		goto none_in;
4092	}
4093	if (stcb->asoc.tsn_in_wrapped) {
4094		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4095			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4096				    stcb->asoc.in_tsnlog[i].tsn,
4097				    stcb->asoc.in_tsnlog[i].strm,
4098				    stcb->asoc.in_tsnlog[i].seq,
4099				    stcb->asoc.in_tsnlog[i].flgs,
4100				    stcb->asoc.in_tsnlog[i].sz);
4101		}
4102	}
4103	if (stcb->asoc.tsn_in_at) {
4104		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4105			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4106				    stcb->asoc.in_tsnlog[i].tsn,
4107				    stcb->asoc.in_tsnlog[i].strm,
4108				    stcb->asoc.in_tsnlog[i].seq,
4109				    stcb->asoc.in_tsnlog[i].flgs,
4110				    stcb->asoc.in_tsnlog[i].sz);
4111		}
4112	}
4113 none_in:
4114	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4115	if ((stcb->asoc.tsn_out_at == 0) &&
4116	    (stcb->asoc.tsn_out_wrapped == 0)) {
4117		SCTP_PRINTF("None sent\n");
4118	}
4119	if (stcb->asoc.tsn_out_wrapped) {
4120		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4121			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4122				    stcb->asoc.out_tsnlog[i].tsn,
4123				    stcb->asoc.out_tsnlog[i].strm,
4124				    stcb->asoc.out_tsnlog[i].seq,
4125				    stcb->asoc.out_tsnlog[i].flgs,
4126				    stcb->asoc.out_tsnlog[i].sz);
4127		}
4128	}
4129	if (stcb->asoc.tsn_out_at) {
4130		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4131			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4132				    stcb->asoc.out_tsnlog[i].tsn,
4133				    stcb->asoc.out_tsnlog[i].strm,
4134				    stcb->asoc.out_tsnlog[i].seq,
4135				    stcb->asoc.out_tsnlog[i].flgs,
4136				    stcb->asoc.out_tsnlog[i].sz);
4137		}
4138	}
4139#endif
4140}
4141#endif
4142
4143void
4144sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4145                          struct mbuf *op_err,
4146                          int so_locked
4147#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4148                          SCTP_UNUSED
4149#endif
4150)
4151{
4152#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4153	struct socket *so;
4154#endif
4155
4156#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4157	so = SCTP_INP_SO(inp);
4158#endif
4159#if defined(__APPLE__)
4160	if (so_locked) {
4161		sctp_lock_assert(SCTP_INP_SO(inp));
4162	} else {
4163		sctp_unlock_assert(SCTP_INP_SO(inp));
4164	}
4165#endif
4166	if (stcb == NULL) {
4167		/* Got to have a TCB */
4168		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4169			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4170#if defined(__APPLE__)
4171				if (!so_locked) {
4172					SCTP_SOCKET_LOCK(so, 1);
4173				}
4174#endif
4175				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4176						SCTP_CALLED_DIRECTLY_NOCMPSET);
4177#if defined(__APPLE__)
4178				if (!so_locked) {
4179					SCTP_SOCKET_UNLOCK(so, 1);
4180				}
4181#endif
4182			}
4183		}
4184		return;
4185	} else {
4186		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4187	}
4188	/* notify the ulp */
4189	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4190		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4191	}
4192	/* notify the peer */
4193	sctp_send_abort_tcb(stcb, op_err, so_locked);
4194	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4195	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4196	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4197		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4198	}
4199	/* now free the asoc */
4200#ifdef SCTP_ASOCLOG_OF_TSNS
4201	sctp_print_out_track_log(stcb);
4202#endif
4203#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4204	if (!so_locked) {
4205		atomic_add_int(&stcb->asoc.refcnt, 1);
4206		SCTP_TCB_UNLOCK(stcb);
4207		SCTP_SOCKET_LOCK(so, 1);
4208		SCTP_TCB_LOCK(stcb);
4209		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4210	}
4211#endif
4212	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
4213#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4214	if (!so_locked) {
4215		SCTP_SOCKET_UNLOCK(so, 1);
4216	}
4217#endif
4218}
4219
4220void
4221sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4222                 struct sockaddr *src, struct sockaddr *dst,
4223                 struct sctphdr *sh, struct sctp_inpcb *inp,
4224                 struct mbuf *cause,
4225#if defined(__FreeBSD__)
4226                 uint8_t use_mflowid, uint32_t mflowid,
4227#endif
4228                 uint32_t vrf_id, uint16_t port)
4229{
4230	struct sctp_chunkhdr *ch, chunk_buf;
4231	unsigned int chk_length;
4232	int contains_init_chunk;
4233
4234	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4235	/* Generate a TO address for future reference */
4236	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4237		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4238#if defined(__APPLE__)
4239			SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4240#endif
4241			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4242					SCTP_CALLED_DIRECTLY_NOCMPSET);
4243#if defined(__APPLE__)
4244			SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4245#endif
4246		}
4247	}
4248	contains_init_chunk = 0;
4249	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4250	    sizeof(*ch), (uint8_t *) & chunk_buf);
4251	while (ch != NULL) {
4252		chk_length = ntohs(ch->chunk_length);
4253		if (chk_length < sizeof(*ch)) {
4254			/* break to abort land */
4255			break;
4256		}
4257		switch (ch->chunk_type) {
4258		case SCTP_INIT:
4259			contains_init_chunk = 1;
4260			break;
4261		case SCTP_PACKET_DROPPED:
4262			/* we don't respond to pkt-dropped */
4263			return;
4264		case SCTP_ABORT_ASSOCIATION:
4265			/* we don't respond with an ABORT to an ABORT */
4266			return;
4267		case SCTP_SHUTDOWN_COMPLETE:
4268			/*
4269			 * we ignore it since we are not waiting for it and
4270			 * peer is gone
4271			 */
4272			return;
4273		case SCTP_SHUTDOWN_ACK:
4274			sctp_send_shutdown_complete2(src, dst, sh,
4275#if defined(__FreeBSD__)
4276			                             use_mflowid, mflowid,
4277#endif
4278			                             vrf_id, port);
4279			return;
4280		default:
4281			break;
4282		}
4283		offset += SCTP_SIZE32(chk_length);
4284		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4285		    sizeof(*ch), (uint8_t *) & chunk_buf);
4286	}
4287	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4288	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4289	     (contains_init_chunk == 0))) {
4290		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4291#if defined(__FreeBSD__)
4292		                use_mflowid, mflowid,
4293#endif
4294		                vrf_id, port);
4295	}
4296}
4297
4298/*
4299 * check the inbound datagram to make sure there is not an abort inside it,
4300 * if there is return 1, else return 0.
4301 */
4302int
4303sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4304{
4305	struct sctp_chunkhdr *ch;
4306	struct sctp_init_chunk *init_chk, chunk_buf;
4307	int offset;
4308	unsigned int chk_length;
4309
4310	offset = iphlen + sizeof(struct sctphdr);
4311	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4312	    (uint8_t *) & chunk_buf);
4313	while (ch != NULL) {
4314		chk_length = ntohs(ch->chunk_length);
4315		if (chk_length < sizeof(*ch)) {
4316			/* packet is probably corrupt */
4317			break;
4318		}
4319		/* we seem to be ok, is it an abort? */
4320		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4321			/* yep, tell them */
4322			return (1);
4323		}
4324		if (ch->chunk_type == SCTP_INITIATION) {
4325			/* need to update the Vtag */
4326			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4327			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4328			if (init_chk != NULL) {
4329				*vtagfill = ntohl(init_chk->init.initiate_tag);
4330			}
4331		}
4332		/* Nope, move to the next chunk */
4333		offset += SCTP_SIZE32(chk_length);
4334		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4335		    sizeof(*ch), (uint8_t *) & chunk_buf);
4336	}
4337	return (0);
4338}
4339
4340/*
4341 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4342 * set (i.e. it's 0) so, create this function to compare link local scopes
4343 */
4344#ifdef INET6
4345uint32_t
4346sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4347{
4348#if defined(__Userspace__)
4349    /*__Userspace__ Returning 1 here always */
4350#endif
4351#if defined(SCTP_EMBEDDED_V6_SCOPE)
4352	struct sockaddr_in6 a, b;
4353
4354	/* save copies */
4355	a = *addr1;
4356	b = *addr2;
4357
4358	if (a.sin6_scope_id == 0)
4359#ifdef SCTP_KAME
4360		if (sa6_recoverscope(&a)) {
4361#else
4362		if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4363#endif				/* SCTP_KAME */
4364			/* can't get scope, so can't match */
4365			return (0);
4366		}
4367	if (b.sin6_scope_id == 0)
4368#ifdef SCTP_KAME
4369		if (sa6_recoverscope(&b)) {
4370#else
4371		if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4372#endif				/* SCTP_KAME */
4373			/* can't get scope, so can't match */
4374			return (0);
4375		}
4376	if (a.sin6_scope_id != b.sin6_scope_id)
4377		return (0);
4378#else
4379	if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4380		return (0);
4381#endif /* SCTP_EMBEDDED_V6_SCOPE */
4382
4383	return (1);
4384}
4385
4386#if defined(SCTP_EMBEDDED_V6_SCOPE)
4387/*
4388 * returns a sockaddr_in6 with embedded scope recovered and removed
4389 */
4390struct sockaddr_in6 *
4391sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4392{
4393	/* check and strip embedded scope junk */
4394	if (addr->sin6_family == AF_INET6) {
4395		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4396			if (addr->sin6_scope_id == 0) {
4397				*store = *addr;
4398#ifdef SCTP_KAME
4399				if (!sa6_recoverscope(store)) {
4400#else
4401				if (!in6_recoverscope(store, &store->sin6_addr,
4402				    NULL)) {
4403#endif /* SCTP_KAME */
4404					/* use the recovered scope */
4405					addr = store;
4406				}
4407			} else {
4408				/* else, return the original "to" addr */
4409				in6_clearscope(&addr->sin6_addr);
4410			}
4411		}
4412	}
4413	return (addr);
4414}
4415#endif /* SCTP_EMBEDDED_V6_SCOPE */
4416#endif
4417
4418/*
4419 * are the two addresses the same?  currently a "scopeless" check returns: 1
4420 * if same, 0 if not
4421 */
4422int
4423sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4424{
4425
4426	/* must be valid */
4427	if (sa1 == NULL || sa2 == NULL)
4428		return (0);
4429
4430	/* must be the same family */
4431	if (sa1->sa_family != sa2->sa_family)
4432		return (0);
4433
4434	switch (sa1->sa_family) {
4435#ifdef INET6
4436	case AF_INET6:
4437	{
4438		/* IPv6 addresses */
4439		struct sockaddr_in6 *sin6_1, *sin6_2;
4440
4441		sin6_1 = (struct sockaddr_in6 *)sa1;
4442		sin6_2 = (struct sockaddr_in6 *)sa2;
4443		return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4444		    sin6_2));
4445	}
4446#endif
4447#ifdef INET
4448	case AF_INET:
4449	{
4450		/* IPv4 addresses */
4451		struct sockaddr_in *sin_1, *sin_2;
4452
4453		sin_1 = (struct sockaddr_in *)sa1;
4454		sin_2 = (struct sockaddr_in *)sa2;
4455		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4456	}
4457#endif
4458#if defined(__Userspace__)
4459	case AF_CONN:
4460	{
4461		struct sockaddr_conn *sconn_1, *sconn_2;
4462
4463		sconn_1 = (struct sockaddr_conn *)sa1;
4464		sconn_2 = (struct sockaddr_conn *)sa2;
4465		return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4466	}
4467#endif
4468	default:
4469		/* we don't do these... */
4470		return (0);
4471	}
4472}
4473
4474void
4475sctp_print_address(struct sockaddr *sa)
4476{
4477#ifdef INET6
4478#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4479	char ip6buf[INET6_ADDRSTRLEN];
4480#endif
4481#endif
4482
4483	switch (sa->sa_family) {
4484#ifdef INET6
4485	case AF_INET6:
4486	{
4487		struct sockaddr_in6 *sin6;
4488
4489		sin6 = (struct sockaddr_in6 *)sa;
4490#if defined(__Userspace__)
4491		SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
4492			    ntohs(sin6->sin6_addr.s6_addr16[0]),
4493			    ntohs(sin6->sin6_addr.s6_addr16[1]),
4494			    ntohs(sin6->sin6_addr.s6_addr16[2]),
4495			    ntohs(sin6->sin6_addr.s6_addr16[3]),
4496			    ntohs(sin6->sin6_addr.s6_addr16[4]),
4497			    ntohs(sin6->sin6_addr.s6_addr16[5]),
4498			    ntohs(sin6->sin6_addr.s6_addr16[6]),
4499			    ntohs(sin6->sin6_addr.s6_addr16[7]),
4500			    ntohs(sin6->sin6_port),
4501			    sin6->sin6_scope_id);
4502#else
4503#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4504		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4505			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4506			    ntohs(sin6->sin6_port),
4507			    sin6->sin6_scope_id);
4508#else
4509		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4510			    ip6_sprintf(&sin6->sin6_addr),
4511			    ntohs(sin6->sin6_port),
4512			    sin6->sin6_scope_id);
4513#endif
4514#endif
4515		break;
4516	}
4517#endif
4518#ifdef INET
4519	case AF_INET:
4520	{
4521		struct sockaddr_in *sin;
4522		unsigned char *p;
4523
4524		sin = (struct sockaddr_in *)sa;
4525		p = (unsigned char *)&sin->sin_addr;
4526		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4527			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4528		break;
4529	}
4530#endif
4531#if defined(__Userspace__)
4532	case AF_CONN:
4533	{
4534		struct sockaddr_conn *sconn;
4535
4536		sconn = (struct sockaddr_conn *)sa;
4537		SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
4538		break;
4539	}
4540#endif
4541	default:
4542		SCTP_PRINTF("?\n");
4543		break;
4544	}
4545}
4546
4547void
4548sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4549    struct sctp_inpcb *new_inp,
4550    struct sctp_tcb *stcb,
4551    int waitflags)
4552{
4553	/*
4554	 * go through our old INP and pull off any control structures that
4555	 * belong to stcb and move then to the new inp.
4556	 */
4557	struct socket *old_so, *new_so;
4558	struct sctp_queued_to_read *control, *nctl;
4559	struct sctp_readhead tmp_queue;
4560	struct mbuf *m;
4561	int error = 0;
4562
4563	old_so = old_inp->sctp_socket;
4564	new_so = new_inp->sctp_socket;
4565	TAILQ_INIT(&tmp_queue);
4566#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4567	SOCKBUF_LOCK(&(old_so->so_rcv));
4568#endif
4569#if defined(__FreeBSD__) || defined(__APPLE__)
4570	error = sblock(&old_so->so_rcv, waitflags);
4571#endif
4572#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4573	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4574#endif
4575	if (error) {
4576		/* Gak, can't get sblock, we have a problem.
4577		 * data will be left stranded.. and we
4578		 * don't dare look at it since the
4579		 * other thread may be reading something.
4580		 * Oh well, its a screwed up app that does
4581		 * a peeloff OR a accept while reading
4582		 * from the main socket... actually its
4583		 * only the peeloff() case, since I think
4584		 * read will fail on a listening socket..
4585		 */
4586		return;
4587	}
4588	/* lock the socket buffers */
4589	SCTP_INP_READ_LOCK(old_inp);
4590	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4591		/* Pull off all for out target stcb */
4592		if (control->stcb == stcb) {
4593			/* remove it we want it */
4594			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4595			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4596			m = control->data;
4597			while (m) {
4598				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4599					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
4600				}
4601				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4602				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4603					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4604				}
4605				m = SCTP_BUF_NEXT(m);
4606			}
4607		}
4608	}
4609	SCTP_INP_READ_UNLOCK(old_inp);
4610	/* Remove the sb-lock on the old socket */
4611#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4612	SOCKBUF_LOCK(&(old_so->so_rcv));
4613#endif
4614#if defined(__APPLE__)
4615	sbunlock(&old_so->so_rcv, 1);
4616#endif
4617
4618#if defined(__FreeBSD__)
4619	sbunlock(&old_so->so_rcv);
4620#endif
4621#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4622	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4623#endif
4624	/* Now we move them over to the new socket buffer */
4625	SCTP_INP_READ_LOCK(new_inp);
4626	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4627		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4628		m = control->data;
4629		while (m) {
4630			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4631				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4632			}
4633			sctp_sballoc(stcb, &new_so->so_rcv, m);
4634			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4635				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4636			}
4637			m = SCTP_BUF_NEXT(m);
4638		}
4639	}
4640	SCTP_INP_READ_UNLOCK(new_inp);
4641}
4642
4643void
4644sctp_add_to_readq(struct sctp_inpcb *inp,
4645    struct sctp_tcb *stcb,
4646    struct sctp_queued_to_read *control,
4647    struct sockbuf *sb,
4648    int end,
4649    int inp_read_lock_held,
4650    int so_locked
4651#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4652    SCTP_UNUSED
4653#endif
4654    )
4655{
4656	/*
4657	 * Here we must place the control on the end of the socket read
4658	 * queue AND increment sb_cc so that select will work properly on
4659	 * read.
4660	 */
4661	struct mbuf *m, *prev = NULL;
4662
4663	if (inp == NULL) {
4664		/* Gak, TSNH!! */
4665#ifdef INVARIANTS
4666		panic("Gak, inp NULL on add_to_readq");
4667#endif
4668		return;
4669	}
4670#if defined(__APPLE__)
4671	if (so_locked) {
4672		sctp_lock_assert(SCTP_INP_SO(inp));
4673	} else {
4674		sctp_unlock_assert(SCTP_INP_SO(inp));
4675	}
4676#endif
4677	if (inp_read_lock_held == 0)
4678		SCTP_INP_READ_LOCK(inp);
4679	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4680		sctp_free_remote_addr(control->whoFrom);
4681		if (control->data) {
4682			sctp_m_freem(control->data);
4683			control->data = NULL;
4684		}
4685		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4686		if (inp_read_lock_held == 0)
4687			SCTP_INP_READ_UNLOCK(inp);
4688		return;
4689	}
4690	if (!(control->spec_flags & M_NOTIFICATION)) {
4691		atomic_add_int(&inp->total_recvs, 1);
4692		if (!control->do_not_ref_stcb) {
4693			atomic_add_int(&stcb->total_recvs, 1);
4694		}
4695	}
4696	m = control->data;
4697	control->held_length = 0;
4698	control->length = 0;
4699	while (m) {
4700		if (SCTP_BUF_LEN(m) == 0) {
4701			/* Skip mbufs with NO length */
4702			if (prev == NULL) {
4703				/* First one */
4704				control->data = sctp_m_free(m);
4705				m = control->data;
4706			} else {
4707				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4708				m = SCTP_BUF_NEXT(prev);
4709			}
4710			if (m == NULL) {
4711				control->tail_mbuf = prev;
4712			}
4713			continue;
4714		}
4715		prev = m;
4716		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4717			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4718		}
4719		sctp_sballoc(stcb, sb, m);
4720		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4721			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4722		}
4723		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4724		m = SCTP_BUF_NEXT(m);
4725	}
4726	if (prev != NULL) {
4727		control->tail_mbuf = prev;
4728	} else {
4729		/* Everything got collapsed out?? */
4730		sctp_free_remote_addr(control->whoFrom);
4731		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4732		if (inp_read_lock_held == 0)
4733			SCTP_INP_READ_UNLOCK(inp);
4734		return;
4735	}
4736	if (end) {
4737		control->end_added = 1;
4738	}
4739#if defined(__Userspace__)
4740	if (inp->recv_callback) {
4741		if (inp_read_lock_held == 0)
4742			SCTP_INP_READ_UNLOCK(inp);
4743		if (control->end_added == 1) {
4744			struct socket *so;
4745			struct mbuf *m;
4746			char *buffer;
4747			struct sctp_rcvinfo rcv;
4748			union sctp_sockstore addr;
4749			int flags;
4750
4751			if ((buffer = malloc(control->length)) == NULL) {
4752				return;
4753			}
4754			so = stcb->sctp_socket;
4755			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4756				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4757			}
4758			atomic_add_int(&stcb->asoc.refcnt, 1);
4759			SCTP_TCB_UNLOCK(stcb);
4760			m_copydata(control->data, 0, control->length, buffer);
4761			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4762			rcv.rcv_sid = control->sinfo_stream;
4763			rcv.rcv_ssn = control->sinfo_ssn;
4764			rcv.rcv_flags = control->sinfo_flags;
4765			rcv.rcv_ppid = control->sinfo_ppid;
4766			rcv.rcv_tsn = control->sinfo_tsn;
4767			rcv.rcv_cumtsn = control->sinfo_cumtsn;
4768			rcv.rcv_context = control->sinfo_context;
4769			rcv.rcv_assoc_id = control->sinfo_assoc_id;
4770			memset(&addr, 0, sizeof(union sctp_sockstore));
4771			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4772#ifdef INET
4773			case AF_INET:
4774				addr.sin = control->whoFrom->ro._l_addr.sin;
4775				break;
4776#endif
4777#ifdef INET6
4778			case AF_INET6:
4779				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
4780				break;
4781#endif
4782			case AF_CONN:
4783				addr.sconn = control->whoFrom->ro._l_addr.sconn;
4784				break;
4785			default:
4786				addr.sa = control->whoFrom->ro._l_addr.sa;
4787				break;
4788			}
4789			flags = MSG_EOR;
4790			if (control->spec_flags & M_NOTIFICATION) {
4791				flags |= MSG_NOTIFICATION;
4792			}
4793			inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
4794			SCTP_TCB_LOCK(stcb);
4795			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4796			sctp_free_remote_addr(control->whoFrom);
4797			control->whoFrom = NULL;
4798			sctp_m_freem(control->data);
4799			control->data = NULL;
4800			control->length = 0;
4801			sctp_free_a_readq(stcb, control);
4802		}
4803		return;
4804	}
4805#endif
4806	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4807	if (inp_read_lock_held == 0)
4808		SCTP_INP_READ_UNLOCK(inp);
4809	if (inp && inp->sctp_socket) {
4810		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4811			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4812		} else {
4813#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4814			struct socket *so;
4815
4816			so = SCTP_INP_SO(inp);
4817			if (!so_locked) {
4818				if (stcb) {
4819					atomic_add_int(&stcb->asoc.refcnt, 1);
4820					SCTP_TCB_UNLOCK(stcb);
4821				}
4822				SCTP_SOCKET_LOCK(so, 1);
4823				if (stcb) {
4824					SCTP_TCB_LOCK(stcb);
4825					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4826				}
4827				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4828					SCTP_SOCKET_UNLOCK(so, 1);
4829					return;
4830				}
4831			}
4832#endif
4833			sctp_sorwakeup(inp, inp->sctp_socket);
4834#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4835			if (!so_locked) {
4836				SCTP_SOCKET_UNLOCK(so, 1);
4837			}
4838#endif
4839		}
4840	}
4841}
4842
4843
4844int
4845sctp_append_to_readq(struct sctp_inpcb *inp,
4846    struct sctp_tcb *stcb,
4847    struct sctp_queued_to_read *control,
4848    struct mbuf *m,
4849    int end,
4850    int ctls_cumack,
4851    struct sockbuf *sb)
4852{
4853	/*
4854	 * A partial delivery API event is underway. OR we are appending on
4855	 * the reassembly queue.
4856	 *
4857	 * If PDAPI this means we need to add m to the end of the data.
4858	 * Increase the length in the control AND increment the sb_cc.
4859	 * Otherwise sb is NULL and all we need to do is put it at the end
4860	 * of the mbuf chain.
4861	 */
4862	int len = 0;
4863	struct mbuf *mm, *tail = NULL, *prev = NULL;
4864
4865	if (inp) {
4866		SCTP_INP_READ_LOCK(inp);
4867	}
4868	if (control == NULL) {
4869	get_out:
4870		if (inp) {
4871			SCTP_INP_READ_UNLOCK(inp);
4872		}
4873		return (-1);
4874	}
4875	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4876		SCTP_INP_READ_UNLOCK(inp);
4877		return (0);
4878	}
4879	if (control->end_added) {
4880		/* huh this one is complete? */
4881		goto get_out;
4882	}
4883	mm = m;
4884	if (mm == NULL) {
4885		goto get_out;
4886	}
4887
4888	while (mm) {
4889		if (SCTP_BUF_LEN(mm) == 0) {
4890			/* Skip mbufs with NO lenght */
4891			if (prev == NULL) {
4892				/* First one */
4893				m = sctp_m_free(mm);
4894				mm = m;
4895			} else {
4896				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4897				mm = SCTP_BUF_NEXT(prev);
4898			}
4899			continue;
4900		}
4901		prev = mm;
4902		len += SCTP_BUF_LEN(mm);
4903		if (sb) {
4904			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4905				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4906			}
4907			sctp_sballoc(stcb, sb, mm);
4908			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4909				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4910			}
4911		}
4912		mm = SCTP_BUF_NEXT(mm);
4913	}
4914	if (prev) {
4915		tail = prev;
4916	} else {
4917		/* Really there should always be a prev */
4918		if (m == NULL) {
4919			/* Huh nothing left? */
4920#ifdef INVARIANTS
4921			panic("Nothing left to add?");
4922#else
4923			goto get_out;
4924#endif
4925		}
4926		tail = m;
4927	}
4928	if (control->tail_mbuf) {
4929		/* append */
4930		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4931		control->tail_mbuf = tail;
4932	} else {
4933		/* nothing there */
4934#ifdef INVARIANTS
4935		if (control->data != NULL) {
4936			panic("This should NOT happen");
4937		}
4938#endif
4939		control->data = m;
4940		control->tail_mbuf = tail;
4941	}
4942	atomic_add_int(&control->length, len);
4943	if (end) {
4944		/* message is complete */
4945		if (stcb && (control == stcb->asoc.control_pdapi)) {
4946			stcb->asoc.control_pdapi = NULL;
4947		}
4948		control->held_length = 0;
4949		control->end_added = 1;
4950	}
4951	if (stcb == NULL) {
4952		control->do_not_ref_stcb = 1;
4953	}
4954	/*
4955	 * When we are appending in partial delivery, the cum-ack is used
4956	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4957	 * is populated in the outbound sinfo structure from the true cumack
4958	 * if the association exists...
4959	 */
4960	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4961#if defined(__Userspace__)
4962	if (inp->recv_callback) {
4963		uint32_t pd_point, length;
4964
4965		length = control->length;
4966		if (stcb != NULL && stcb->sctp_socket != NULL) {
4967			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
4968			               stcb->sctp_ep->partial_delivery_point);
4969		} else {
4970			pd_point = inp->partial_delivery_point;
4971		}
4972		if ((control->end_added == 1) || (length >= pd_point)) {
4973			struct socket *so;
4974			char *buffer;
4975			struct sctp_rcvinfo rcv;
4976			union sctp_sockstore addr;
4977			int flags;
4978
4979			if ((buffer = malloc(control->length)) == NULL) {
4980				return (-1);
4981			}
4982			so = stcb->sctp_socket;
4983			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4984				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4985			}
4986			m_copydata(control->data, 0, control->length, buffer);
4987			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4988			rcv.rcv_sid = control->sinfo_stream;
4989			rcv.rcv_ssn = control->sinfo_ssn;
4990			rcv.rcv_flags = control->sinfo_flags;
4991			rcv.rcv_ppid = control->sinfo_ppid;
4992			rcv.rcv_tsn = control->sinfo_tsn;
4993			rcv.rcv_cumtsn = control->sinfo_cumtsn;
4994			rcv.rcv_context = control->sinfo_context;
4995			rcv.rcv_assoc_id = control->sinfo_assoc_id;
4996			memset(&addr, 0, sizeof(union sctp_sockstore));
4997			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4998#ifdef INET
4999			case AF_INET:
5000				addr.sin = control->whoFrom->ro._l_addr.sin;
5001				break;
5002#endif
5003#ifdef INET6
5004			case AF_INET6:
5005				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5006				break;
5007#endif
5008			case AF_CONN:
5009				addr.sconn = control->whoFrom->ro._l_addr.sconn;
5010				break;
5011			default:
5012				addr.sa = control->whoFrom->ro._l_addr.sa;
5013				break;
5014			}
5015			flags = 0;
5016			if (control->end_added == 1) {
5017				flags |= MSG_EOR;
5018			}
5019			if (control->spec_flags & M_NOTIFICATION) {
5020				flags |= MSG_NOTIFICATION;
5021			}
5022			sctp_m_freem(control->data);
5023			control->data = NULL;
5024			control->tail_mbuf = NULL;
5025			control->length = 0;
5026			if (control->end_added) {
5027				sctp_free_remote_addr(control->whoFrom);
5028				control->whoFrom = NULL;
5029				sctp_free_a_readq(stcb, control);
5030			} else {
5031				control->some_taken = 1;
5032			}
5033			atomic_add_int(&stcb->asoc.refcnt, 1);
5034			SCTP_TCB_UNLOCK(stcb);
5035			inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5036			SCTP_TCB_LOCK(stcb);
5037			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5038		}
5039		if (inp)
5040			SCTP_INP_READ_UNLOCK(inp);
5041		return (0);
5042	}
5043#endif
5044	if (inp) {
5045		SCTP_INP_READ_UNLOCK(inp);
5046	}
5047	if (inp && inp->sctp_socket) {
5048		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
5049			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
5050		} else {
5051#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5052			struct socket *so;
5053
5054			so = SCTP_INP_SO(inp);
5055			if (stcb) {
5056				atomic_add_int(&stcb->asoc.refcnt, 1);
5057				SCTP_TCB_UNLOCK(stcb);
5058			}
5059			SCTP_SOCKET_LOCK(so, 1);
5060			if (stcb) {
5061				SCTP_TCB_LOCK(stcb);
5062				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5063			}
5064			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5065				SCTP_SOCKET_UNLOCK(so, 1);
5066				return (0);
5067			}
5068#endif
5069			sctp_sorwakeup(inp, inp->sctp_socket);
5070#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5071			SCTP_SOCKET_UNLOCK(so, 1);
5072#endif
5073		}
5074	}
5075	return (0);
5076}
5077
5078
5079
5080/*************HOLD THIS COMMENT FOR PATCH FILE OF
5081 *************ALTERNATE ROUTING CODE
5082 */
5083
5084/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5085 *************ALTERNATE ROUTING CODE
5086 */
5087
5088struct mbuf *
5089sctp_generate_cause(uint16_t code, char *info)
5090{
5091	struct mbuf *m;
5092	struct sctp_gen_error_cause *cause;
5093	size_t info_len, len;
5094
5095	if ((code == 0) || (info == NULL)) {
5096		return (NULL);
5097	}
5098	info_len = strlen(info);
5099	len = sizeof(struct sctp_paramhdr) + info_len;
5100	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5101	if (m != NULL) {
5102		SCTP_BUF_LEN(m) = len;
5103		cause = mtod(m, struct sctp_gen_error_cause *);
5104		cause->code = htons(code);
5105		cause->length = htons((uint16_t)len);
5106		memcpy(cause->info, info, info_len);
5107	}
5108	return (m);
5109}
5110
5111struct mbuf *
5112sctp_generate_no_user_data_cause(uint32_t tsn)
5113{
5114	struct mbuf *m;
5115	struct sctp_error_no_user_data *no_user_data_cause;
5116	size_t len;
5117
5118	len = sizeof(struct sctp_error_no_user_data);
5119	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5120	if (m != NULL) {
5121		SCTP_BUF_LEN(m) = len;
5122		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5123		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5124		no_user_data_cause->cause.length = htons((uint16_t)len);
5125		no_user_data_cause->tsn = tsn; /* tsn is passed in as NBO */
5126	}
5127	return (m);
5128}
5129
5130#ifdef SCTP_MBCNT_LOGGING
5131void
5132sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5133    struct sctp_tmit_chunk *tp1, int chk_cnt)
5134{
5135	if (tp1->data == NULL) {
5136		return;
5137	}
5138	asoc->chunks_on_out_queue -= chk_cnt;
5139	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5140		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5141			       asoc->total_output_queue_size,
5142			       tp1->book_size,
5143			       0,
5144			       tp1->mbcnt);
5145	}
5146	if (asoc->total_output_queue_size >= tp1->book_size) {
5147		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5148	} else {
5149		asoc->total_output_queue_size = 0;
5150	}
5151
5152	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5153				  ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5154		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5155			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5156		} else {
5157			stcb->sctp_socket->so_snd.sb_cc = 0;
5158
5159		}
5160	}
5161}
5162
5163#endif
5164
5165int
5166sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5167			   uint8_t sent, int so_locked
5168#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5169			   SCTP_UNUSED
5170#endif
5171	)
5172{
5173	struct sctp_stream_out *strq;
5174	struct sctp_tmit_chunk *chk = NULL, *tp2;
5175	struct sctp_stream_queue_pending *sp;
5176	uint16_t stream = 0, seq = 0;
5177	uint8_t foundeom = 0;
5178	int ret_sz = 0;
5179	int notdone;
5180	int do_wakeup_routine = 0;
5181
5182#if defined(__APPLE__)
5183	if (so_locked) {
5184		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5185	} else {
5186		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5187	}
5188#endif
5189	stream = tp1->rec.data.stream_number;
5190	seq = tp1->rec.data.stream_seq;
5191	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5192		stcb->asoc.abandoned_sent[0]++;
5193		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5194		stcb->asoc.strmout[stream].abandoned_sent[0]++;
5195#if defined(SCTP_DETAILED_STR_STATS)
5196		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5197#endif
5198	} else {
5199		stcb->asoc.abandoned_unsent[0]++;
5200		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5201		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
5202#if defined(SCTP_DETAILED_STR_STATS)
5203		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5204#endif
5205	}
5206	do {
5207		ret_sz += tp1->book_size;
5208		if (tp1->data != NULL) {
5209			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5210				sctp_flight_size_decrease(tp1);
5211				sctp_total_flight_decrease(stcb, tp1);
5212			}
5213			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5214			stcb->asoc.peers_rwnd += tp1->send_size;
5215			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5216			if (sent) {
5217				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5218			} else {
5219				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5220			}
5221			if (tp1->data) {
5222				sctp_m_freem(tp1->data);
5223				tp1->data = NULL;
5224			}
5225			do_wakeup_routine = 1;
5226			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5227				stcb->asoc.sent_queue_cnt_removeable--;
5228			}
5229		}
5230		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5231		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5232		    SCTP_DATA_NOT_FRAG) {
5233			/* not frag'ed we ae done   */
5234			notdone = 0;
5235			foundeom = 1;
5236		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5237			/* end of frag, we are done */
5238			notdone = 0;
5239			foundeom = 1;
5240		} else {
5241			/*
5242			 * Its a begin or middle piece, we must mark all of
5243			 * it
5244			 */
5245			notdone = 1;
5246			tp1 = TAILQ_NEXT(tp1, sctp_next);
5247		}
5248	} while (tp1 && notdone);
5249	if (foundeom == 0) {
5250		/*
5251		 * The multi-part message was scattered across the send and
5252		 * sent queue.
5253		 */
5254		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5255			if ((tp1->rec.data.stream_number != stream) ||
5256			    (tp1->rec.data.stream_seq != seq)) {
5257				break;
5258			}
5259			/* save to chk in case we have some on stream out
5260			 * queue. If so and we have an un-transmitted one
5261			 * we don't have to fudge the TSN.
5262			 */
5263			chk = tp1;
5264			ret_sz += tp1->book_size;
5265			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5266			if (sent) {
5267				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5268			} else {
5269				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5270			}
5271			if (tp1->data) {
5272				sctp_m_freem(tp1->data);
5273				tp1->data = NULL;
5274			}
5275			/* No flight involved here book the size to 0 */
5276			tp1->book_size = 0;
5277			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5278				foundeom = 1;
5279			}
5280			do_wakeup_routine = 1;
5281			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5282			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5283			/* on to the sent queue so we can wait for it to be passed by. */
5284			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5285					  sctp_next);
5286			stcb->asoc.send_queue_cnt--;
5287			stcb->asoc.sent_queue_cnt++;
5288		}
5289	}
5290	if (foundeom == 0) {
5291		/*
5292		 * Still no eom found. That means there
5293		 * is stuff left on the stream out queue.. yuck.
5294		 */
5295		SCTP_TCB_SEND_LOCK(stcb);
5296		strq = &stcb->asoc.strmout[stream];
5297		sp = TAILQ_FIRST(&strq->outqueue);
5298		if (sp != NULL) {
5299			sp->discard_rest = 1;
5300			/*
5301			 * We may need to put a chunk on the
5302			 * queue that holds the TSN that
5303			 * would have been sent with the LAST
5304			 * bit.
5305			 */
5306			if (chk == NULL) {
5307				/* Yep, we have to */
5308				sctp_alloc_a_chunk(stcb, chk);
5309				if (chk == NULL) {
5310					/* we are hosed. All we can
5311					 * do is nothing.. which will
5312					 * cause an abort if the peer is
5313					 * paying attention.
5314					 */
5315					goto oh_well;
5316				}
5317				memset(chk, 0, sizeof(*chk));
5318				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
5319				chk->sent = SCTP_FORWARD_TSN_SKIP;
5320				chk->asoc = &stcb->asoc;
5321				chk->rec.data.stream_seq = strq->next_sequence_send;
5322				chk->rec.data.stream_number = sp->stream;
5323				chk->rec.data.payloadtype = sp->ppid;
5324				chk->rec.data.context = sp->context;
5325				chk->flags = sp->act_flags;
5326				if (sp->net)
5327					chk->whoTo = sp->net;
5328				else
5329					chk->whoTo = stcb->asoc.primary_destination;
5330				atomic_add_int(&chk->whoTo->ref_count, 1);
5331#if defined(__FreeBSD__) || defined(__Panda__)
5332				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5333#else
5334				chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
5335#endif
5336				stcb->asoc.pr_sctp_cnt++;
5337				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5338				stcb->asoc.sent_queue_cnt++;
5339				stcb->asoc.pr_sctp_cnt++;
5340			} else {
5341				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5342			}
5343			strq->next_sequence_send++;
5344		oh_well:
5345			if (sp->data) {
5346				/* Pull any data to free up the SB and
5347				 * allow sender to "add more" while we
5348				 * will throw away :-)
5349				 */
5350				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5351				ret_sz += sp->length;
5352				do_wakeup_routine = 1;
5353				sp->some_taken = 1;
5354				sctp_m_freem(sp->data);
5355				sp->data = NULL;
5356				sp->tail_mbuf = NULL;
5357				sp->length = 0;
5358			}
5359		}
5360		SCTP_TCB_SEND_UNLOCK(stcb);
5361	}
5362	if (do_wakeup_routine) {
5363#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5364		struct socket *so;
5365
5366		so = SCTP_INP_SO(stcb->sctp_ep);
5367		if (!so_locked) {
5368			atomic_add_int(&stcb->asoc.refcnt, 1);
5369			SCTP_TCB_UNLOCK(stcb);
5370			SCTP_SOCKET_LOCK(so, 1);
5371			SCTP_TCB_LOCK(stcb);
5372			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5373			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5374				/* assoc was freed while we were unlocked */
5375				SCTP_SOCKET_UNLOCK(so, 1);
5376				return (ret_sz);
5377			}
5378		}
5379#endif
5380		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5381#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5382		if (!so_locked) {
5383			SCTP_SOCKET_UNLOCK(so, 1);
5384		}
5385#endif
5386	}
5387	return (ret_sz);
5388}
5389
5390/*
5391 * checks to see if the given address, sa, is one that is currently known by
5392 * the kernel note: can't distinguish the same address on multiple interfaces
5393 * and doesn't handle multiple addresses with different zone/scope id's note:
5394 * ifa_ifwithaddr() compares the entire sockaddr struct
5395 */
5396struct sctp_ifa *
5397sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5398		    int holds_lock)
5399{
5400	struct sctp_laddr *laddr;
5401
5402	if (holds_lock == 0) {
5403		SCTP_INP_RLOCK(inp);
5404	}
5405
5406	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5407		if (laddr->ifa == NULL)
5408			continue;
5409		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5410			continue;
5411#ifdef INET
5412		if (addr->sa_family == AF_INET) {
5413			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5414			    laddr->ifa->address.sin.sin_addr.s_addr) {
5415				/* found him. */
5416				if (holds_lock == 0) {
5417					SCTP_INP_RUNLOCK(inp);
5418				}
5419				return (laddr->ifa);
5420				break;
5421			}
5422		}
5423#endif
5424#ifdef INET6
5425		if (addr->sa_family == AF_INET6) {
5426			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5427						 &laddr->ifa->address.sin6)) {
5428				/* found him. */
5429				if (holds_lock == 0) {
5430					SCTP_INP_RUNLOCK(inp);
5431				}
5432				return (laddr->ifa);
5433				break;
5434			}
5435		}
5436#endif
5437#if defined(__Userspace__)
5438		if (addr->sa_family == AF_CONN) {
5439			if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5440				/* found him. */
5441				if (holds_lock == 0) {
5442					SCTP_INP_RUNLOCK(inp);
5443				}
5444				return (laddr->ifa);
5445				break;
5446			}
5447		}
5448#endif
5449	}
5450	if (holds_lock == 0) {
5451		SCTP_INP_RUNLOCK(inp);
5452	}
5453	return (NULL);
5454}
5455
5456uint32_t
5457sctp_get_ifa_hash_val(struct sockaddr *addr)
5458{
5459	switch (addr->sa_family) {
5460#ifdef INET
5461	case AF_INET:
5462	{
5463		struct sockaddr_in *sin;
5464
5465		sin = (struct sockaddr_in *)addr;
5466		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5467	}
5468#endif
5469#ifdef INET6
5470	case AF_INET6:
5471	{
5472		struct sockaddr_in6 *sin6;
5473		uint32_t hash_of_addr;
5474
5475		sin6 = (struct sockaddr_in6 *)addr;
5476#if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
5477		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5478				sin6->sin6_addr.s6_addr32[1] +
5479				sin6->sin6_addr.s6_addr32[2] +
5480				sin6->sin6_addr.s6_addr32[3]);
5481#else
5482		hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5483				((uint32_t *)&sin6->sin6_addr)[1] +
5484				((uint32_t *)&sin6->sin6_addr)[2] +
5485				((uint32_t *)&sin6->sin6_addr)[3]);
5486#endif
5487		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5488		return (hash_of_addr);
5489	}
5490#endif
5491#if defined(__Userspace__)
5492	case AF_CONN:
5493	{
5494		struct sockaddr_conn *sconn;
5495		uintptr_t temp;
5496
5497		sconn = (struct sockaddr_conn *)addr;
5498		temp = (uintptr_t)sconn->sconn_addr;
5499		return ((uint32_t)(temp ^ (temp >> 16)));
5500	}
5501#endif
5502	default:
5503		break;
5504	}
5505	return (0);
5506}
5507
5508struct sctp_ifa *
5509sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5510{
5511	struct sctp_ifa *sctp_ifap;
5512	struct sctp_vrf *vrf;
5513	struct sctp_ifalist *hash_head;
5514	uint32_t hash_of_addr;
5515
5516	if (holds_lock == 0)
5517		SCTP_IPI_ADDR_RLOCK();
5518
5519	vrf = sctp_find_vrf(vrf_id);
5520	if (vrf == NULL) {
5521	stage_right:
5522		if (holds_lock == 0)
5523			SCTP_IPI_ADDR_RUNLOCK();
5524		return (NULL);
5525	}
5526
5527	hash_of_addr = sctp_get_ifa_hash_val(addr);
5528
5529	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5530	if (hash_head == NULL) {
5531		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5532			    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5533			    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5534		sctp_print_address(addr);
5535		SCTP_PRINTF("No such bucket for address\n");
5536		if (holds_lock == 0)
5537			SCTP_IPI_ADDR_RUNLOCK();
5538
5539		return (NULL);
5540	}
5541	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5542		if (sctp_ifap == NULL) {
5543#ifdef INVARIANTS
5544			panic("Huh LIST_FOREACH corrupt");
5545		        goto stage_right;
5546#else
5547			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5548			goto stage_right;
5549#endif
5550		}
5551		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5552			continue;
5553#ifdef INET
5554		if (addr->sa_family == AF_INET) {
5555			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5556			    sctp_ifap->address.sin.sin_addr.s_addr) {
5557				/* found him. */
5558				if (holds_lock == 0)
5559					SCTP_IPI_ADDR_RUNLOCK();
5560				return (sctp_ifap);
5561				break;
5562			}
5563		}
5564#endif
5565#ifdef INET6
5566		if (addr->sa_family == AF_INET6) {
5567			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5568						 &sctp_ifap->address.sin6)) {
5569				/* found him. */
5570				if (holds_lock == 0)
5571					SCTP_IPI_ADDR_RUNLOCK();
5572				return (sctp_ifap);
5573				break;
5574			}
5575		}
5576#endif
5577#if defined(__Userspace__)
5578		if (addr->sa_family == AF_CONN) {
5579			if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5580				/* found him. */
5581				if (holds_lock == 0)
5582					SCTP_IPI_ADDR_RUNLOCK();
5583				return (sctp_ifap);
5584				break;
5585			}
5586		}
5587#endif
5588	}
5589	if (holds_lock == 0)
5590		SCTP_IPI_ADDR_RUNLOCK();
5591	return (NULL);
5592}
5593
5594static void
5595sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5596	       uint32_t rwnd_req)
5597{
5598	/* User pulled some data, do we need a rwnd update? */
5599	int r_unlocked = 0;
5600	uint32_t dif, rwnd;
5601	struct socket *so = NULL;
5602
5603	if (stcb == NULL)
5604		return;
5605
5606	atomic_add_int(&stcb->asoc.refcnt, 1);
5607
5608	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5609				SCTP_STATE_SHUTDOWN_RECEIVED |
5610				SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5611		/* Pre-check If we are freeing no update */
5612		goto no_lock;
5613	}
5614	SCTP_INP_INCR_REF(stcb->sctp_ep);
5615	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5616	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5617		goto out;
5618	}
5619	so = stcb->sctp_socket;
5620	if (so == NULL) {
5621		goto out;
5622	}
5623	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5624	/* Have you have freed enough to look */
5625	*freed_so_far = 0;
5626	/* Yep, its worth a look and the lock overhead */
5627
5628	/* Figure out what the rwnd would be */
5629	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5630	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5631		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5632	} else {
5633		dif = 0;
5634	}
5635	if (dif >= rwnd_req) {
5636		if (hold_rlock) {
5637			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5638			r_unlocked = 1;
5639		}
5640		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5641			/*
5642			 * One last check before we allow the guy possibly
5643			 * to get in. There is a race, where the guy has not
5644			 * reached the gate. In that case
5645			 */
5646			goto out;
5647		}
5648		SCTP_TCB_LOCK(stcb);
5649		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5650			/* No reports here */
5651			SCTP_TCB_UNLOCK(stcb);
5652			goto out;
5653		}
5654		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5655		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5656
5657		sctp_chunk_output(stcb->sctp_ep, stcb,
5658				  SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5659		/* make sure no timer is running */
5660		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
5661		SCTP_TCB_UNLOCK(stcb);
5662	} else {
5663		/* Update how much we have pending */
5664		stcb->freed_by_sorcv_sincelast = dif;
5665	}
5666 out:
5667	if (so && r_unlocked && hold_rlock) {
5668		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5669	}
5670
5671	SCTP_INP_DECR_REF(stcb->sctp_ep);
5672 no_lock:
5673	atomic_add_int(&stcb->asoc.refcnt, -1);
5674	return;
5675}
5676
5677int
5678sctp_sorecvmsg(struct socket *so,
5679    struct uio *uio,
5680    struct mbuf **mp,
5681    struct sockaddr *from,
5682    int fromlen,
5683    int *msg_flags,
5684    struct sctp_sndrcvinfo *sinfo,
5685    int filling_sinfo)
5686{
5687	/*
5688	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5689	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5690	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5691	 * On the way out we may send out any combination of:
5692	 * MSG_NOTIFICATION MSG_EOR
5693	 *
5694	 */
5695	struct sctp_inpcb *inp = NULL;
5696	int my_len = 0;
5697	int cp_len = 0, error = 0;
5698	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5699	struct mbuf *m = NULL;
5700	struct sctp_tcb *stcb = NULL;
5701	int wakeup_read_socket = 0;
5702	int freecnt_applied = 0;
5703	int out_flags = 0, in_flags = 0;
5704	int block_allowed = 1;
5705	uint32_t freed_so_far = 0;
5706	uint32_t copied_so_far = 0;
5707	int in_eeor_mode = 0;
5708	int no_rcv_needed = 0;
5709	uint32_t rwnd_req = 0;
5710	int hold_sblock = 0;
5711	int hold_rlock = 0;
5712	int slen = 0;
5713	uint32_t held_length = 0;
5714#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5715	int sockbuf_lock = 0;
5716#endif
5717
5718	if (uio == NULL) {
5719		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5720		return (EINVAL);
5721	}
5722
5723	if (msg_flags) {
5724		in_flags = *msg_flags;
5725		if (in_flags & MSG_PEEK)
5726			SCTP_STAT_INCR(sctps_read_peeks);
5727	} else {
5728		in_flags = 0;
5729	}
5730#if defined(__APPLE__)
5731#if defined(APPLE_LEOPARD)
5732	slen = uio->uio_resid;
5733#else
5734	slen = uio_resid(uio);
5735#endif
5736#else
5737	slen = uio->uio_resid;
5738#endif
5739
5740	/* Pull in and set up our int flags */
5741	if (in_flags & MSG_OOB) {
5742		/* Out of band's NOT supported */
5743		return (EOPNOTSUPP);
5744	}
5745	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5746		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5747		return (EINVAL);
5748	}
5749	if ((in_flags & (MSG_DONTWAIT
5750#if defined(__FreeBSD__) && __FreeBSD_version > 500000
5751			 | MSG_NBIO
5752#endif
5753		     )) ||
5754	    SCTP_SO_IS_NBIO(so)) {
5755		block_allowed = 0;
5756	}
5757	/* setup the endpoint */
5758	inp = (struct sctp_inpcb *)so->so_pcb;
5759	if (inp == NULL) {
5760		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5761		return (EFAULT);
5762	}
5763	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5764	/* Must be at least a MTU's worth */
5765	if (rwnd_req < SCTP_MIN_RWND)
5766		rwnd_req = SCTP_MIN_RWND;
5767	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5768	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5769#if defined(__APPLE__)
5770#if defined(APPLE_LEOPARD)
5771		sctp_misc_ints(SCTP_SORECV_ENTER,
5772			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5773#else
5774		sctp_misc_ints(SCTP_SORECV_ENTER,
5775			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
5776#endif
5777#else
5778		sctp_misc_ints(SCTP_SORECV_ENTER,
5779			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5780#endif
5781	}
5782#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5783	SOCKBUF_LOCK(&so->so_rcv);
5784	hold_sblock = 1;
5785#endif
5786	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
5787#if defined(__APPLE__)
5788#if defined(APPLE_LEOPARD)
5789		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5790			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5791#else
5792		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5793			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
5794#endif
5795#else
5796		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5797			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5798#endif
5799	}
5800
5801#if defined(__APPLE__)
5802	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5803#endif
5804
5805#if defined(__FreeBSD__)
5806	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5807#endif
5808	if (error) {
5809		goto release_unlocked;
5810	}
5811#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5812        sockbuf_lock = 1;
5813#endif
5814 restart:
5815#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5816	if (hold_sblock == 0) {
5817		SOCKBUF_LOCK(&so->so_rcv);
5818		hold_sblock = 1;
5819	}
5820#endif
5821#if defined(__APPLE__)
5822	sbunlock(&so->so_rcv, 1);
5823#endif
5824
5825#if defined(__FreeBSD__) && __FreeBSD_version < 700000
5826	sbunlock(&so->so_rcv);
5827#endif
5828
5829 restart_nosblocks:
5830	if (hold_sblock == 0) {
5831		SOCKBUF_LOCK(&so->so_rcv);
5832		hold_sblock = 1;
5833	}
5834	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5835	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5836		goto out;
5837	}
5838#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
5839	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5840#else
5841	if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5842#endif
5843		if (so->so_error) {
5844			error = so->so_error;
5845			if ((in_flags & MSG_PEEK) == 0)
5846				so->so_error = 0;
5847			goto out;
5848		} else {
5849			if (so->so_rcv.sb_cc == 0) {
5850				/* indicate EOF */
5851				error = 0;
5852				goto out;
5853			}
5854		}
5855	}
5856	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5857		/* we need to wait for data */
5858		if ((so->so_rcv.sb_cc == 0) &&
5859		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5860		     (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5861			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5862				/* For active open side clear flags for re-use
5863				 * passive open is blocked by connect.
5864				 */
5865				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5866					/* You were aborted, passive side always hits here */
5867					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5868					error = ECONNRESET;
5869				}
5870				so->so_state &= ~(SS_ISCONNECTING |
5871						  SS_ISDISCONNECTING |
5872						  SS_ISCONFIRMING |
5873						  SS_ISCONNECTED);
5874				if (error == 0) {
5875					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5876						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5877						error = ENOTCONN;
5878					}
5879				}
5880				goto out;
5881			}
5882		}
5883		error = sbwait(&so->so_rcv);
5884		if (error) {
5885			goto out;
5886		}
5887		held_length = 0;
5888		goto restart_nosblocks;
5889	} else if (so->so_rcv.sb_cc == 0) {
5890		if (so->so_error) {
5891			error = so->so_error;
5892			if ((in_flags & MSG_PEEK) == 0)
5893				so->so_error = 0;
5894		} else {
5895			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5896			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5897				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5898					/* For active open side clear flags for re-use
5899					 * passive open is blocked by connect.
5900					 */
5901					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5902						/* You were aborted, passive side always hits here */
5903						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5904						error = ECONNRESET;
5905					}
5906					so->so_state &= ~(SS_ISCONNECTING |
5907							  SS_ISDISCONNECTING |
5908							  SS_ISCONFIRMING |
5909							  SS_ISCONNECTED);
5910					if (error == 0) {
5911						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5912							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5913							error = ENOTCONN;
5914						}
5915					}
5916					goto out;
5917				}
5918			}
5919			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5920			error = EWOULDBLOCK;
5921		}
5922		goto out;
5923	}
5924	if (hold_sblock == 1) {
5925		SOCKBUF_UNLOCK(&so->so_rcv);
5926		hold_sblock = 0;
5927	}
5928#if defined(__APPLE__)
5929	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5930#endif
5931#if defined(__FreeBSD__) && __FreeBSD_version < 700000
5932	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
5933#endif
5934	/* we possibly have data we can read */
5935	/*sa_ignore FREED_MEMORY*/
5936	control = TAILQ_FIRST(&inp->read_queue);
5937	if (control == NULL) {
5938		/* This could be happening since
5939		 * the appender did the increment but as not
5940		 * yet did the tailq insert onto the read_queue
5941		 */
5942		if (hold_rlock == 0) {
5943			SCTP_INP_READ_LOCK(inp);
5944		}
5945		control = TAILQ_FIRST(&inp->read_queue);
5946		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5947#ifdef INVARIANTS
5948			panic("Huh, its non zero and nothing on control?");
5949#endif
5950			so->so_rcv.sb_cc = 0;
5951		}
5952		SCTP_INP_READ_UNLOCK(inp);
5953		hold_rlock = 0;
5954		goto restart;
5955	}
5956
5957	if ((control->length == 0) &&
5958	    (control->do_not_ref_stcb)) {
5959		/* Clean up code for freeing assoc that left behind a pdapi..
5960		 * maybe a peer in EEOR that just closed after sending and
5961		 * never indicated a EOR.
5962		 */
5963		if (hold_rlock == 0) {
5964			hold_rlock = 1;
5965			SCTP_INP_READ_LOCK(inp);
5966		}
5967		control->held_length = 0;
5968		if (control->data) {
5969			/* Hmm there is data here .. fix */
5970			struct mbuf *m_tmp;
5971			int cnt = 0;
5972			m_tmp = control->data;
5973			while (m_tmp) {
5974				cnt += SCTP_BUF_LEN(m_tmp);
5975				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5976					control->tail_mbuf = m_tmp;
5977					control->end_added = 1;
5978				}
5979				m_tmp = SCTP_BUF_NEXT(m_tmp);
5980			}
5981			control->length = cnt;
5982		} else {
5983			/* remove it */
5984			TAILQ_REMOVE(&inp->read_queue, control, next);
5985			/* Add back any hiddend data */
5986			sctp_free_remote_addr(control->whoFrom);
5987			sctp_free_a_readq(stcb, control);
5988		}
5989		if (hold_rlock) {
5990			hold_rlock = 0;
5991			SCTP_INP_READ_UNLOCK(inp);
5992		}
5993		goto restart;
5994	}
5995	if ((control->length == 0) &&
5996	    (control->end_added == 1)) {
5997		/* Do we also need to check for (control->pdapi_aborted == 1)? */
5998		if (hold_rlock == 0) {
5999			hold_rlock = 1;
6000			SCTP_INP_READ_LOCK(inp);
6001		}
6002		TAILQ_REMOVE(&inp->read_queue, control, next);
6003		if (control->data) {
6004#ifdef INVARIANTS
6005			panic("control->data not null but control->length == 0");
6006#else
6007			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
6008			sctp_m_freem(control->data);
6009			control->data = NULL;
6010#endif
6011		}
6012		if (control->aux_data) {
6013			sctp_m_free (control->aux_data);
6014			control->aux_data = NULL;
6015		}
6016		sctp_free_remote_addr(control->whoFrom);
6017		sctp_free_a_readq(stcb, control);
6018		if (hold_rlock) {
6019			hold_rlock = 0;
6020			SCTP_INP_READ_UNLOCK(inp);
6021		}
6022		goto restart;
6023	}
6024	if (control->length == 0) {
6025		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6026		    (filling_sinfo)) {
6027			/* find a more suitable one then this */
6028			ctl = TAILQ_NEXT(control, next);
6029			while (ctl) {
6030				if ((ctl->stcb != control->stcb) && (ctl->length) &&
6031				    (ctl->some_taken ||
6032				     (ctl->spec_flags & M_NOTIFICATION) ||
6033				     ((ctl->do_not_ref_stcb == 0) &&
6034				      (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6035					) {
6036					/*-
6037					 * If we have a different TCB next, and there is data
6038					 * present. If we have already taken some (pdapi), OR we can
6039					 * ref the tcb and no delivery as started on this stream, we
6040					 * take it. Note we allow a notification on a different
6041					 * assoc to be delivered..
6042					 */
6043					control = ctl;
6044					goto found_one;
6045				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6046					   (ctl->length) &&
6047					   ((ctl->some_taken) ||
6048					    ((ctl->do_not_ref_stcb == 0) &&
6049					     ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6050					     (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6051					/*-
6052					 * If we have the same tcb, and there is data present, and we
6053					 * have the strm interleave feature present. Then if we have
6054					 * taken some (pdapi) or we can refer to tht tcb AND we have
6055					 * not started a delivery for this stream, we can take it.
6056					 * Note we do NOT allow a notificaiton on the same assoc to
6057					 * be delivered.
6058					 */
6059					control = ctl;
6060					goto found_one;
6061				}
6062				ctl = TAILQ_NEXT(ctl, next);
6063			}
6064		}
6065		/*
6066		 * if we reach here, not suitable replacement is available
6067		 * <or> fragment interleave is NOT on. So stuff the sb_cc
6068		 * into the our held count, and its time to sleep again.
6069		 */
6070		held_length = so->so_rcv.sb_cc;
6071		control->held_length = so->so_rcv.sb_cc;
6072		goto restart;
6073	}
6074	/* Clear the held length since there is something to read */
6075	control->held_length = 0;
6076	if (hold_rlock) {
6077		SCTP_INP_READ_UNLOCK(inp);
6078		hold_rlock = 0;
6079	}
6080 found_one:
6081	/*
6082	 * If we reach here, control has a some data for us to read off.
6083	 * Note that stcb COULD be NULL.
6084	 */
6085	control->some_taken++;
6086	if (hold_sblock) {
6087		SOCKBUF_UNLOCK(&so->so_rcv);
6088		hold_sblock = 0;
6089	}
6090	stcb = control->stcb;
6091	if (stcb) {
6092		if ((control->do_not_ref_stcb == 0) &&
6093		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6094			if (freecnt_applied == 0)
6095				stcb = NULL;
6096		} else if (control->do_not_ref_stcb == 0) {
6097			/* you can't free it on me please */
6098			/*
6099			 * The lock on the socket buffer protects us so the
6100			 * free code will stop. But since we used the socketbuf
6101			 * lock and the sender uses the tcb_lock to increment,
6102			 * we need to use the atomic add to the refcnt
6103			 */
6104			if (freecnt_applied) {
6105#ifdef INVARIANTS
6106				panic("refcnt already incremented");
6107#else
6108				SCTP_PRINTF("refcnt already incremented?\n");
6109#endif
6110			} else {
6111				atomic_add_int(&stcb->asoc.refcnt, 1);
6112				freecnt_applied = 1;
6113			}
6114			/*
6115			 * Setup to remember how much we have not yet told
6116			 * the peer our rwnd has opened up. Note we grab
6117			 * the value from the tcb from last time.
6118			 * Note too that sack sending clears this when a sack
6119			 * is sent, which is fine. Once we hit the rwnd_req,
6120			 * we then will go to the sctp_user_rcvd() that will
6121			 * not lock until it KNOWs it MUST send a WUP-SACK.
6122			 */
6123			freed_so_far = stcb->freed_by_sorcv_sincelast;
6124			stcb->freed_by_sorcv_sincelast = 0;
6125		}
6126        }
6127	if (stcb &&
6128	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6129	    control->do_not_ref_stcb == 0) {
6130		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6131	}
6132
6133	/* First lets get off the sinfo and sockaddr info */
6134	if ((sinfo) && filling_sinfo) {
6135		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
6136		nxt = TAILQ_NEXT(control, next);
6137		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6138		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6139			struct sctp_extrcvinfo *s_extra;
6140			s_extra = (struct sctp_extrcvinfo *)sinfo;
6141			if ((nxt) &&
6142			    (nxt->length)) {
6143				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6144				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6145					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6146				}
6147				if (nxt->spec_flags & M_NOTIFICATION) {
6148					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6149				}
6150				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
6151				s_extra->sreinfo_next_length = nxt->length;
6152				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
6153				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
6154				if (nxt->tail_mbuf != NULL) {
6155					if (nxt->end_added) {
6156						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6157					}
6158				}
6159			} else {
6160				/* we explicitly 0 this, since the memcpy got
6161				 * some other things beyond the older sinfo_
6162				 * that is on the control's structure :-D
6163				 */
6164				nxt = NULL;
6165				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6166				s_extra->sreinfo_next_aid = 0;
6167				s_extra->sreinfo_next_length = 0;
6168				s_extra->sreinfo_next_ppid = 0;
6169				s_extra->sreinfo_next_stream = 0;
6170			}
6171		}
6172		/*
6173		 * update off the real current cum-ack, if we have an stcb.
6174		 */
6175		if ((control->do_not_ref_stcb == 0) && stcb)
6176			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6177		/*
6178		 * mask off the high bits, we keep the actual chunk bits in
6179		 * there.
6180		 */
6181		sinfo->sinfo_flags &= 0x00ff;
6182		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6183			sinfo->sinfo_flags |= SCTP_UNORDERED;
6184		}
6185	}
6186#ifdef SCTP_ASOCLOG_OF_TSNS
6187	{
6188		int index, newindex;
6189		struct sctp_pcbtsn_rlog *entry;
6190		do {
6191			index = inp->readlog_index;
6192			newindex = index + 1;
6193			if (newindex >= SCTP_READ_LOG_SIZE) {
6194				newindex = 0;
6195			}
6196		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6197		entry = &inp->readlog[index];
6198		entry->vtag = control->sinfo_assoc_id;
6199		entry->strm = control->sinfo_stream;
6200		entry->seq = control->sinfo_ssn;
6201		entry->sz = control->length;
6202		entry->flgs = control->sinfo_flags;
6203	}
6204#endif
6205	if (fromlen && from) {
6206#ifdef HAVE_SA_LEN
6207		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
6208#endif
6209		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6210#ifdef INET6
6211			case AF_INET6:
6212#ifndef HAVE_SA_LEN
6213				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in6));
6214#endif
6215				((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
6216				break;
6217#endif
6218#ifdef INET
6219			case AF_INET:
6220#ifndef HAVE_SA_LEN
6221				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in));
6222#endif
6223				((struct sockaddr_in *)from)->sin_port = control->port_from;
6224				break;
6225#endif
6226#if defined(__Userspace__)
6227			case AF_CONN:
6228#ifndef HAVE_SA_LEN
6229				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_conn));
6230#endif
6231				((struct sockaddr_conn *)from)->sconn_port = control->port_from;
6232				break;
6233#endif
6234			default:
6235#ifndef HAVE_SA_LEN
6236				cp_len = min((size_t)fromlen, sizeof(struct sockaddr));
6237#endif
6238				break;
6239		}
6240		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
6241
6242#if defined(INET) && defined(INET6)
6243		if ((sctp_is_feature_on(inp,SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
6244		    (from->sa_family == AF_INET) &&
6245		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
6246			struct sockaddr_in *sin;
6247			struct sockaddr_in6 sin6;
6248
6249			sin = (struct sockaddr_in *)from;
6250			bzero(&sin6, sizeof(sin6));
6251			sin6.sin6_family = AF_INET6;
6252#ifdef HAVE_SIN6_LEN
6253			sin6.sin6_len = sizeof(struct sockaddr_in6);
6254#endif
6255#if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) || defined(__Userspace_os_Windows)
6256			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
6257			bcopy(&sin->sin_addr,
6258			      &(((uint32_t *)&sin6.sin6_addr)[3]),
6259			      sizeof(uint32_t));
6260#elif defined(__Windows__)
6261			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
6262			bcopy(&sin->sin_addr,
6263			      &((uint32_t *)&sin6.sin6_addr)[3],
6264			      sizeof(uint32_t));
6265#else
6266			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
6267			bcopy(&sin->sin_addr,
6268			      &sin6.sin6_addr.s6_addr32[3],
6269			      sizeof(sin6.sin6_addr.s6_addr32[3]));
6270#endif
6271			sin6.sin6_port = sin->sin_port;
6272			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
6273		}
6274#endif
6275#if defined(SCTP_EMBEDDED_V6_SCOPE)
6276#ifdef INET6
6277		{
6278			struct sockaddr_in6 lsa6, *from6;
6279
6280			from6 = (struct sockaddr_in6 *)from;
6281			sctp_recover_scope_mac(from6, (&lsa6));
6282		}
6283#endif
6284#endif
6285	}
6286	/* now copy out what data we can */
6287	if (mp == NULL) {
6288		/* copy out each mbuf in the chain up to length */
6289	get_more_data:
6290		m = control->data;
6291		while (m) {
6292			/* Move out all we can */
6293#if defined(__APPLE__)
6294#if defined(APPLE_LEOPARD)
6295			cp_len = (int)uio->uio_resid;
6296#else
6297			cp_len = (int)uio_resid(uio);
6298#endif
6299#else
6300			cp_len = (int)uio->uio_resid;
6301#endif
6302			my_len = (int)SCTP_BUF_LEN(m);
6303			if (cp_len > my_len) {
6304				/* not enough in this buf */
6305				cp_len = my_len;
6306			}
6307			if (hold_rlock) {
6308				SCTP_INP_READ_UNLOCK(inp);
6309				hold_rlock = 0;
6310			}
6311#if defined(__APPLE__)
6312			SCTP_SOCKET_UNLOCK(so, 0);
6313#endif
6314			if (cp_len > 0)
6315				error = uiomove(mtod(m, char *), cp_len, uio);
6316#if defined(__APPLE__)
6317			SCTP_SOCKET_LOCK(so, 0);
6318#endif
6319			/* re-read */
6320			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6321				goto release;
6322			}
6323
6324			if ((control->do_not_ref_stcb == 0) && stcb &&
6325			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6326				no_rcv_needed = 1;
6327			}
6328			if (error) {
6329				/* error we are out of here */
6330				goto release;
6331			}
6332			if ((SCTP_BUF_NEXT(m) == NULL) &&
6333			    (cp_len >= SCTP_BUF_LEN(m)) &&
6334			    ((control->end_added == 0) ||
6335			     (control->end_added &&
6336			      (TAILQ_NEXT(control, next) == NULL)))
6337				) {
6338				SCTP_INP_READ_LOCK(inp);
6339				hold_rlock = 1;
6340			}
6341			if (cp_len == SCTP_BUF_LEN(m)) {
6342				if ((SCTP_BUF_NEXT(m)== NULL) &&
6343				    (control->end_added)) {
6344					out_flags |= MSG_EOR;
6345					if ((control->do_not_ref_stcb == 0)  &&
6346					    (control->stcb != NULL) &&
6347					    ((control->spec_flags & M_NOTIFICATION) == 0))
6348						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6349				}
6350				if (control->spec_flags & M_NOTIFICATION) {
6351					out_flags |= MSG_NOTIFICATION;
6352				}
6353				/* we ate up the mbuf */
6354				if (in_flags & MSG_PEEK) {
6355					/* just looking */
6356					m = SCTP_BUF_NEXT(m);
6357					copied_so_far += cp_len;
6358				} else {
6359					/* dispose of the mbuf */
6360					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6361						sctp_sblog(&so->so_rcv,
6362						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6363					}
6364					sctp_sbfree(control, stcb, &so->so_rcv, m);
6365					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6366						sctp_sblog(&so->so_rcv,
6367						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6368					}
6369					copied_so_far += cp_len;
6370					freed_so_far += cp_len;
6371					freed_so_far += MSIZE;
6372					atomic_subtract_int(&control->length, cp_len);
6373					control->data = sctp_m_free(m);
6374					m = control->data;
6375					/* been through it all, must hold sb lock ok to null tail */
6376					if (control->data == NULL) {
6377#ifdef INVARIANTS
6378#if !defined(__APPLE__)
6379						if ((control->end_added == 0) ||
6380						    (TAILQ_NEXT(control, next) == NULL)) {
6381							/* If the end is not added, OR the
6382							 * next is NOT null we MUST have the lock.
6383							 */
6384							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6385								panic("Hmm we don't own the lock?");
6386							}
6387						}
6388#endif
6389#endif
6390						control->tail_mbuf = NULL;
6391#ifdef INVARIANTS
6392						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6393							panic("end_added, nothing left and no MSG_EOR");
6394						}
6395#endif
6396					}
6397				}
6398			} else {
6399				/* Do we need to trim the mbuf? */
6400				if (control->spec_flags & M_NOTIFICATION) {
6401					out_flags |= MSG_NOTIFICATION;
6402				}
6403				if ((in_flags & MSG_PEEK) == 0) {
6404					SCTP_BUF_RESV_UF(m, cp_len);
6405					SCTP_BUF_LEN(m) -= cp_len;
6406					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6407						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
6408					}
6409					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6410					if ((control->do_not_ref_stcb == 0) &&
6411					    stcb) {
6412						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6413					}
6414					copied_so_far += cp_len;
6415					freed_so_far += cp_len;
6416					freed_so_far += MSIZE;
6417					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6418						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6419							   SCTP_LOG_SBRESULT, 0);
6420					}
6421					atomic_subtract_int(&control->length, cp_len);
6422				} else {
6423					copied_so_far += cp_len;
6424				}
6425			}
6426#if defined(__APPLE__)
6427#if defined(APPLE_LEOPARD)
6428			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6429#else
6430			if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6431#endif
6432#else
6433			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6434#endif
6435				break;
6436			}
6437			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6438			    (control->do_not_ref_stcb == 0) &&
6439			    (freed_so_far >= rwnd_req)) {
6440				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6441			}
6442		} /* end while(m) */
6443		/*
6444		 * At this point we have looked at it all and we either have
6445		 * a MSG_EOR/or read all the user wants... <OR>
6446		 * control->length == 0.
6447		 */
6448		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6449			/* we are done with this control */
6450			if (control->length == 0) {
6451				if (control->data) {
6452#ifdef INVARIANTS
6453					panic("control->data not null at read eor?");
6454#else
6455					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6456					sctp_m_freem(control->data);
6457					control->data = NULL;
6458#endif
6459				}
6460			done_with_control:
6461				if (TAILQ_NEXT(control, next) == NULL) {
6462					/* If we don't have a next we need a
6463					 * lock, if there is a next interrupt
6464					 * is filling ahead of us and we don't
6465					 * need a lock to remove this guy
6466					 * (which is the head of the queue).
6467					 */
6468					if (hold_rlock == 0) {
6469						SCTP_INP_READ_LOCK(inp);
6470						hold_rlock = 1;
6471					}
6472				}
6473				TAILQ_REMOVE(&inp->read_queue, control, next);
6474				/* Add back any hiddend data */
6475				if (control->held_length) {
6476					held_length = 0;
6477					control->held_length = 0;
6478					wakeup_read_socket = 1;
6479				}
6480				if (control->aux_data) {
6481					sctp_m_free (control->aux_data);
6482					control->aux_data = NULL;
6483				}
6484				no_rcv_needed = control->do_not_ref_stcb;
6485				sctp_free_remote_addr(control->whoFrom);
6486				control->data = NULL;
6487				sctp_free_a_readq(stcb, control);
6488				control = NULL;
6489				if ((freed_so_far >= rwnd_req) &&
6490				    (no_rcv_needed == 0))
6491					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6492
6493			} else {
6494				/*
6495				 * The user did not read all of this
6496				 * message, turn off the returned MSG_EOR
6497				 * since we are leaving more behind on the
6498				 * control to read.
6499				 */
6500#ifdef INVARIANTS
6501				if (control->end_added &&
6502				    (control->data == NULL) &&
6503				    (control->tail_mbuf == NULL)) {
6504					panic("Gak, control->length is corrupt?");
6505				}
6506#endif
6507				no_rcv_needed = control->do_not_ref_stcb;
6508				out_flags &= ~MSG_EOR;
6509			}
6510		}
6511		if (out_flags & MSG_EOR) {
6512			goto release;
6513		}
6514#if defined(__APPLE__)
6515#if defined(APPLE_LEOPARD)
6516		if ((uio->uio_resid == 0) ||
6517#else
6518		if ((uio_resid(uio) == 0) ||
6519#endif
6520#else
6521		if ((uio->uio_resid == 0) ||
6522#endif
6523		    ((in_eeor_mode) &&
6524		     (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
6525			goto release;
6526		}
6527		/*
6528		 * If I hit here the receiver wants more and this message is
6529		 * NOT done (pd-api). So two questions. Can we block? if not
6530		 * we are done. Did the user NOT set MSG_WAITALL?
6531		 */
6532		if (block_allowed == 0) {
6533			goto release;
6534		}
6535		/*
6536		 * We need to wait for more data a few things: - We don't
6537		 * sbunlock() so we don't get someone else reading. - We
6538		 * must be sure to account for the case where what is added
6539		 * is NOT to our control when we wakeup.
6540		 */
6541
6542		/* Do we need to tell the transport a rwnd update might be
6543		 * needed before we go to sleep?
6544		 */
6545		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6546		    ((freed_so_far >= rwnd_req) &&
6547		     (control->do_not_ref_stcb == 0) &&
6548		     (no_rcv_needed == 0))) {
6549			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6550		}
6551	wait_some_more:
6552#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6553		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6554			goto release;
6555		}
6556#else
6557		if (so->so_state & SS_CANTRCVMORE) {
6558			goto release;
6559		}
6560#endif
6561
6562		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6563			goto release;
6564
6565		if (hold_rlock == 1) {
6566			SCTP_INP_READ_UNLOCK(inp);
6567			hold_rlock = 0;
6568		}
6569		if (hold_sblock == 0) {
6570			SOCKBUF_LOCK(&so->so_rcv);
6571			hold_sblock = 1;
6572		}
6573		if ((copied_so_far) && (control->length == 0) &&
6574		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6575			goto release;
6576		}
6577#if defined(__APPLE__)
6578		sbunlock(&so->so_rcv, 1);
6579#endif
6580		if (so->so_rcv.sb_cc <= control->held_length) {
6581			error = sbwait(&so->so_rcv);
6582			if (error) {
6583#if defined(__FreeBSD__)
6584				goto release;
6585#else
6586				goto release_unlocked;
6587#endif
6588			}
6589			control->held_length = 0;
6590		}
6591#if defined(__APPLE__)
6592		error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6593#endif
6594		if (hold_sblock) {
6595			SOCKBUF_UNLOCK(&so->so_rcv);
6596			hold_sblock = 0;
6597		}
6598		if (control->length == 0) {
6599			/* still nothing here */
6600			if (control->end_added == 1) {
6601				/* he aborted, or is done i.e.did a shutdown */
6602				out_flags |= MSG_EOR;
6603				if (control->pdapi_aborted) {
6604					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6605						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6606
6607					out_flags |= MSG_TRUNC;
6608				} else {
6609					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6610						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6611				}
6612				goto done_with_control;
6613			}
6614			if (so->so_rcv.sb_cc > held_length) {
6615				control->held_length = so->so_rcv.sb_cc;
6616				held_length = 0;
6617			}
6618			goto wait_some_more;
6619		} else if (control->data == NULL) {
6620			/* we must re-sync since data
6621			 * is probably being added
6622			 */
6623			SCTP_INP_READ_LOCK(inp);
6624			if ((control->length > 0) && (control->data == NULL)) {
6625				/* big trouble.. we have the lock and its corrupt? */
6626#ifdef INVARIANTS
6627				panic ("Impossible data==NULL length !=0");
6628#endif
6629				out_flags |= MSG_EOR;
6630				out_flags |= MSG_TRUNC;
6631				control->length = 0;
6632				SCTP_INP_READ_UNLOCK(inp);
6633				goto done_with_control;
6634			}
6635			SCTP_INP_READ_UNLOCK(inp);
6636			/* We will fall around to get more data */
6637		}
6638		goto get_more_data;
6639	} else {
6640		/*-
6641		 * Give caller back the mbuf chain,
6642		 * store in uio_resid the length
6643		 */
6644		wakeup_read_socket = 0;
6645		if ((control->end_added == 0) ||
6646		    (TAILQ_NEXT(control, next) == NULL)) {
6647			/* Need to get rlock */
6648			if (hold_rlock == 0) {
6649				SCTP_INP_READ_LOCK(inp);
6650				hold_rlock = 1;
6651			}
6652		}
6653		if (control->end_added) {
6654			out_flags |= MSG_EOR;
6655			if ((control->do_not_ref_stcb == 0) &&
6656			    (control->stcb != NULL) &&
6657			    ((control->spec_flags & M_NOTIFICATION) == 0))
6658				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6659		}
6660		if (control->spec_flags & M_NOTIFICATION) {
6661			out_flags |= MSG_NOTIFICATION;
6662		}
6663#if defined(__APPLE__)
6664#if defined(APPLE_LEOPARD)
6665		uio->uio_resid = control->length;
6666#else
6667		uio_setresid(uio, control->length);
6668#endif
6669#else
6670		uio->uio_resid = control->length;
6671#endif
6672		*mp = control->data;
6673		m = control->data;
6674		while (m) {
6675			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6676				sctp_sblog(&so->so_rcv,
6677				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6678			}
6679			sctp_sbfree(control, stcb, &so->so_rcv, m);
6680			freed_so_far += SCTP_BUF_LEN(m);
6681			freed_so_far += MSIZE;
6682			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6683				sctp_sblog(&so->so_rcv,
6684				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6685			}
6686			m = SCTP_BUF_NEXT(m);
6687		}
6688		control->data = control->tail_mbuf = NULL;
6689		control->length = 0;
6690		if (out_flags & MSG_EOR) {
6691			/* Done with this control */
6692			goto done_with_control;
6693		}
6694	}
6695 release:
6696	if (hold_rlock == 1) {
6697		SCTP_INP_READ_UNLOCK(inp);
6698		hold_rlock = 0;
6699	}
6700#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
6701	if (hold_sblock == 0) {
6702		SOCKBUF_LOCK(&so->so_rcv);
6703		hold_sblock = 1;
6704	}
6705#else
6706	if (hold_sblock == 1) {
6707		SOCKBUF_UNLOCK(&so->so_rcv);
6708		hold_sblock = 0;
6709	}
6710#endif
6711#if defined(__APPLE__)
6712	sbunlock(&so->so_rcv, 1);
6713#endif
6714
6715#if defined(__FreeBSD__)
6716	sbunlock(&so->so_rcv);
6717#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6718	sockbuf_lock = 0;
6719#endif
6720#endif
6721
6722 release_unlocked:
6723	if (hold_sblock) {
6724		SOCKBUF_UNLOCK(&so->so_rcv);
6725		hold_sblock = 0;
6726	}
6727	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6728		if ((freed_so_far >= rwnd_req) &&
6729		    (control && (control->do_not_ref_stcb == 0)) &&
6730		    (no_rcv_needed == 0))
6731			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6732	}
6733 out:
6734	if (msg_flags) {
6735		*msg_flags = out_flags;
6736	}
6737	if (((out_flags & MSG_EOR) == 0) &&
6738	    ((in_flags & MSG_PEEK) == 0) &&
6739	    (sinfo) &&
6740	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6741	     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6742		struct sctp_extrcvinfo *s_extra;
6743		s_extra = (struct sctp_extrcvinfo *)sinfo;
6744		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6745	}
6746	if (hold_rlock == 1) {
6747		SCTP_INP_READ_UNLOCK(inp);
6748	}
6749	if (hold_sblock) {
6750		SOCKBUF_UNLOCK(&so->so_rcv);
6751	}
6752#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6753	if (sockbuf_lock) {
6754		sbunlock(&so->so_rcv);
6755	}
6756#endif
6757
6758	if (freecnt_applied) {
6759		/*
6760		 * The lock on the socket buffer protects us so the free
6761		 * code will stop. But since we used the socketbuf lock and
6762		 * the sender uses the tcb_lock to increment, we need to use
6763		 * the atomic add to the refcnt.
6764		 */
6765		if (stcb == NULL) {
6766#ifdef INVARIANTS
6767			panic("stcb for refcnt has gone NULL?");
6768			goto stage_left;
6769#else
6770			goto stage_left;
6771#endif
6772		}
6773		atomic_add_int(&stcb->asoc.refcnt, -1);
6774		/* Save the value back for next time */
6775		stcb->freed_by_sorcv_sincelast = freed_so_far;
6776	}
6777	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6778		if (stcb) {
6779			sctp_misc_ints(SCTP_SORECV_DONE,
6780				       freed_so_far,
6781#if defined(__APPLE__)
6782#if defined(APPLE_LEOPARD)
6783				       ((uio) ? (slen - uio->uio_resid) : slen),
6784#else
6785				       ((uio) ? (slen - uio_resid(uio)) : slen),
6786#endif
6787#else
6788				       ((uio) ? (slen - uio->uio_resid) : slen),
6789#endif
6790				       stcb->asoc.my_rwnd,
6791				       so->so_rcv.sb_cc);
6792		} else {
6793			sctp_misc_ints(SCTP_SORECV_DONE,
6794				       freed_so_far,
6795#if defined(__APPLE__)
6796#if defined(APPLE_LEOPARD)
6797				       ((uio) ? (slen - uio->uio_resid) : slen),
6798#else
6799				       ((uio) ? (slen - uio_resid(uio)) : slen),
6800#endif
6801#else
6802				       ((uio) ? (slen - uio->uio_resid) : slen),
6803#endif
6804				       0,
6805				       so->so_rcv.sb_cc);
6806		}
6807	}
6808 stage_left:
6809	if (wakeup_read_socket) {
6810		sctp_sorwakeup(inp, so);
6811	}
6812	return (error);
6813}
6814
6815
6816#ifdef SCTP_MBUF_LOGGING
6817struct mbuf *
6818sctp_m_free(struct mbuf *m)
6819{
6820	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6821		if (SCTP_BUF_IS_EXTENDED(m)) {
6822			sctp_log_mb(m, SCTP_MBUF_IFREE);
6823		}
6824	}
6825	return (m_free(m));
6826}
6827
6828void sctp_m_freem(struct mbuf *mb)
6829{
6830	while (mb != NULL)
6831		mb = sctp_m_free(mb);
6832}
6833
6834#endif
6835
6836int
6837sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6838{
6839	/* Given a local address. For all associations
6840	 * that holds the address, request a peer-set-primary.
6841	 */
6842	struct sctp_ifa *ifa;
6843	struct sctp_laddr *wi;
6844
6845	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6846	if (ifa == NULL) {
6847		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6848		return (EADDRNOTAVAIL);
6849	}
6850	/* Now that we have the ifa we must awaken the
6851	 * iterator with this message.
6852	 */
6853	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6854	if (wi == NULL) {
6855		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6856		return (ENOMEM);
6857	}
6858	/* Now incr the count and int wi structure */
6859	SCTP_INCR_LADDR_COUNT();
6860	bzero(wi, sizeof(*wi));
6861	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6862	wi->ifa = ifa;
6863	wi->action = SCTP_SET_PRIM_ADDR;
6864	atomic_add_int(&ifa->refcount, 1);
6865
6866	/* Now add it to the work queue */
6867	SCTP_WQ_ADDR_LOCK();
6868	/*
6869	 * Should this really be a tailq? As it is we will process the
6870	 * newest first :-0
6871	 */
6872	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6873	SCTP_WQ_ADDR_UNLOCK();
6874	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6875			 (struct sctp_inpcb *)NULL,
6876			 (struct sctp_tcb *)NULL,
6877			 (struct sctp_nets *)NULL);
6878	return (0);
6879}
6880
6881#if defined(__Userspace__)
6882/* no sctp_soreceive for __Userspace__ now */
6883#endif
6884
6885#if !defined(__Userspace__)
6886int
6887sctp_soreceive(	struct socket *so,
6888		struct sockaddr **psa,
6889		struct uio *uio,
6890		struct mbuf **mp0,
6891		struct mbuf **controlp,
6892		int *flagsp)
6893{
6894	int error, fromlen;
6895	uint8_t sockbuf[256];
6896	struct sockaddr *from;
6897	struct sctp_extrcvinfo sinfo;
6898	int filling_sinfo = 1;
6899	struct sctp_inpcb *inp;
6900
6901	inp = (struct sctp_inpcb *)so->so_pcb;
6902	/* pickup the assoc we are reading from */
6903	if (inp == NULL) {
6904		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6905		return (EINVAL);
6906	}
6907	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6908	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6909	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6910	    (controlp == NULL)) {
6911		/* user does not want the sndrcv ctl */
6912		filling_sinfo = 0;
6913	}
6914	if (psa) {
6915		from = (struct sockaddr *)sockbuf;
6916		fromlen = sizeof(sockbuf);
6917#ifdef HAVE_SA_LEN
6918		from->sa_len = 0;
6919#endif
6920	} else {
6921		from = NULL;
6922		fromlen = 0;
6923	}
6924
6925#if defined(__APPLE__)
6926	SCTP_SOCKET_LOCK(so, 1);
6927#endif
6928	if (filling_sinfo) {
6929		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6930	}
6931	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6932	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6933	if (controlp != NULL) {
6934		/* copy back the sinfo in a CMSG format */
6935		if (filling_sinfo)
6936			*controlp = sctp_build_ctl_nchunk(inp,
6937			                                  (struct sctp_sndrcvinfo *)&sinfo);
6938		else
6939			*controlp = NULL;
6940	}
6941	if (psa) {
6942		/* copy back the address info */
6943#ifdef HAVE_SA_LEN
6944		if (from && from->sa_len) {
6945#else
6946		if (from) {
6947#endif
6948#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6949			*psa = sodupsockaddr(from, M_NOWAIT);
6950#else
6951			*psa = dup_sockaddr(from, mp0 == 0);
6952#endif
6953		} else {
6954			*psa = NULL;
6955		}
6956	}
6957#if defined(__APPLE__)
6958	SCTP_SOCKET_UNLOCK(so, 1);
6959#endif
6960	return (error);
6961}
6962
6963
6964#if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
6965/*
6966 * General routine to allocate a hash table with control of memory flags.
6967 * is in 7.0 and beyond for sure :-)
6968 */
6969void *
6970sctp_hashinit_flags(int elements, struct malloc_type *type,
6971                    u_long *hashmask, int flags)
6972{
6973	long hashsize;
6974	LIST_HEAD(generic, generic) *hashtbl;
6975	int i;
6976
6977
6978	if (elements <= 0) {
6979#ifdef INVARIANTS
6980		panic("hashinit: bad elements");
6981#else
6982		SCTP_PRINTF("hashinit: bad elements?");
6983		elements = 1;
6984#endif
6985	}
6986	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6987		continue;
6988	hashsize >>= 1;
6989	if (flags & HASH_WAITOK)
6990		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
6991	else if (flags & HASH_NOWAIT)
6992		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
6993	else {
6994#ifdef INVARIANTS
6995		panic("flag incorrect in hashinit_flags");
6996#else
6997		return (NULL);
6998#endif
6999	}
7000
7001	/* no memory? */
7002	if (hashtbl == NULL)
7003		return (NULL);
7004
7005	for (i = 0; i < hashsize; i++)
7006		LIST_INIT(&hashtbl[i]);
7007	*hashmask = hashsize - 1;
7008	return (hashtbl);
7009}
7010#endif
7011
7012#else /*  __Userspace__ ifdef above sctp_soreceive */
7013/*
7014 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
7015 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
7016 *__FreeBSD__ must be excluded.
7017 *
7018 */
7019
7020void *
7021sctp_hashinit_flags(int elements, struct malloc_type *type,
7022                    u_long *hashmask, int flags)
7023{
7024	long hashsize;
7025	LIST_HEAD(generic, generic) *hashtbl;
7026	int i;
7027
7028	if (elements <= 0) {
7029		SCTP_PRINTF("hashinit: bad elements?");
7030#ifdef INVARIANTS
7031		return (NULL);
7032#else
7033		elements = 1;
7034#endif
7035	}
7036	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7037		continue;
7038	hashsize >>= 1;
7039	/*cannot use MALLOC here because it has to be declared or defined
7040	  using MALLOC_DECLARE or MALLOC_DEFINE first. */
7041	if (flags & HASH_WAITOK)
7042		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7043	else if (flags & HASH_NOWAIT)
7044		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7045	else {
7046#ifdef INVARIANTS
7047		SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7048#endif
7049		return (NULL);
7050	}
7051
7052	/* no memory? */
7053	if (hashtbl == NULL)
7054		return (NULL);
7055
7056	for (i = 0; i < hashsize; i++)
7057		LIST_INIT(&hashtbl[i]);
7058	*hashmask = hashsize - 1;
7059	return (hashtbl);
7060}
7061
7062
7063void
7064sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7065{
7066	LIST_HEAD(generic, generic) *hashtbl, *hp;
7067
7068	hashtbl = vhashtbl;
7069	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7070		if (!LIST_EMPTY(hp)) {
7071			SCTP_PRINTF("hashdestroy: hash not empty.\n");
7072			return;
7073		}
7074	FREE(hashtbl, type);
7075}
7076
7077
7078void
7079sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7080{
7081	LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7082	/*
7083	LIST_ENTRY(type) *start, *temp;
7084	 */
7085	hashtbl = vhashtbl;
7086	/* Apparently temp is not dynamically allocated, so attempts to
7087	   free it results in error.
7088	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7089		if (!LIST_EMPTY(hp)) {
7090			start = LIST_FIRST(hp);
7091			while (start != NULL) {
7092				temp = start;
7093				start = start->le_next;
7094				SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7095				FREE(temp, type);
7096			}
7097		}
7098	 */
7099	FREE(hashtbl, type);
7100}
7101
7102
7103#endif
7104
7105
7106int
7107sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7108			 int totaddr, int *error)
7109{
7110	int added = 0;
7111	int i;
7112	struct sctp_inpcb *inp;
7113	struct sockaddr *sa;
7114	size_t incr = 0;
7115#ifdef INET
7116	struct sockaddr_in *sin;
7117#endif
7118#ifdef INET6
7119	struct sockaddr_in6 *sin6;
7120#endif
7121
7122	sa = addr;
7123	inp = stcb->sctp_ep;
7124	*error = 0;
7125	for (i = 0; i < totaddr; i++) {
7126		switch (sa->sa_family) {
7127#ifdef INET
7128		case AF_INET:
7129			incr = sizeof(struct sockaddr_in);
7130			sin = (struct sockaddr_in *)sa;
7131			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7132			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7133			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7134				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7135				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7136				*error = EINVAL;
7137				goto out_now;
7138			}
7139			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7140				/* assoc gone no un-lock */
7141				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7142				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7143				*error = ENOBUFS;
7144				goto out_now;
7145			}
7146			added++;
7147			break;
7148#endif
7149#ifdef INET6
7150		case AF_INET6:
7151			incr = sizeof(struct sockaddr_in6);
7152			sin6 = (struct sockaddr_in6 *)sa;
7153			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7154			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7155				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7156				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7157				*error = EINVAL;
7158				goto out_now;
7159			}
7160			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7161				/* assoc gone no un-lock */
7162				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7163				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7164				*error = ENOBUFS;
7165				goto out_now;
7166			}
7167			added++;
7168			break;
7169#endif
7170#if defined(__Userspace__)
7171		case AF_CONN:
7172			incr = sizeof(struct sockaddr_in6);
7173			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7174				/* assoc gone no un-lock */
7175				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7176				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7177				*error = ENOBUFS;
7178				goto out_now;
7179			}
7180			added++;
7181			break;
7182#endif
7183		default:
7184			break;
7185		}
7186		sa = (struct sockaddr *)((caddr_t)sa + incr);
7187	}
7188 out_now:
7189	return (added);
7190}
7191
7192struct sctp_tcb *
7193sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7194			  int *totaddr, int *num_v4, int *num_v6, int *error,
7195			  int limit, int *bad_addr)
7196{
7197	struct sockaddr *sa;
7198	struct sctp_tcb *stcb = NULL;
7199	size_t incr, at, i;
7200	at = incr = 0;
7201	sa = addr;
7202
7203	*error = *num_v6 = *num_v4 = 0;
7204	/* account and validate addresses */
7205	for (i = 0; i < (size_t)*totaddr; i++) {
7206		switch (sa->sa_family) {
7207#ifdef INET
7208		case AF_INET:
7209			(*num_v4) += 1;
7210			incr = sizeof(struct sockaddr_in);
7211#ifdef HAVE_SA_LEN
7212			if (sa->sa_len != incr) {
7213				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7214				*error = EINVAL;
7215				*bad_addr = 1;
7216				return (NULL);
7217			}
7218#endif
7219			break;
7220#endif
7221#ifdef INET6
7222		case AF_INET6:
7223		{
7224			struct sockaddr_in6 *sin6;
7225
7226			sin6 = (struct sockaddr_in6 *)sa;
7227			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7228				/* Must be non-mapped for connectx */
7229				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7230				*error = EINVAL;
7231				*bad_addr = 1;
7232				return (NULL);
7233			}
7234			(*num_v6) += 1;
7235			incr = sizeof(struct sockaddr_in6);
7236#ifdef HAVE_SA_LEN
7237			if (sa->sa_len != incr) {
7238				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7239				*error = EINVAL;
7240				*bad_addr = 1;
7241				return (NULL);
7242			}
7243#endif
7244			break;
7245		}
7246#endif
7247		default:
7248			*totaddr = i;
7249			/* we are done */
7250			break;
7251		}
7252		if (i == (size_t)*totaddr) {
7253			break;
7254		}
7255		SCTP_INP_INCR_REF(inp);
7256		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7257		if (stcb != NULL) {
7258			/* Already have or am bring up an association */
7259			return (stcb);
7260		} else {
7261			SCTP_INP_DECR_REF(inp);
7262		}
7263		if ((at + incr) > (size_t)limit) {
7264			*totaddr = i;
7265			break;
7266		}
7267		sa = (struct sockaddr *)((caddr_t)sa + incr);
7268	}
7269	return ((struct sctp_tcb *)NULL);
7270}
7271
7272/*
7273 * sctp_bindx(ADD) for one address.
7274 * assumes all arguments are valid/checked by caller.
7275 */
7276void
7277sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7278		       struct sockaddr *sa, sctp_assoc_t assoc_id,
7279		       uint32_t vrf_id, int *error, void *p)
7280{
7281	struct sockaddr *addr_touse;
7282#ifdef INET6
7283	struct sockaddr_in sin;
7284#endif
7285#ifdef SCTP_MVRF
7286	int i, fnd = 0;
7287#endif
7288
7289	/* see if we're bound all already! */
7290	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7291		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7292		*error = EINVAL;
7293		return;
7294	}
7295#ifdef SCTP_MVRF
7296	/* Is the VRF one we have */
7297	for (i = 0; i < inp->num_vrfs; i++) {
7298		if (vrf_id == inp->m_vrf_ids[i]) {
7299			fnd = 1;
7300			break;
7301		}
7302	}
7303	if (!fnd) {
7304		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7305		*error = EINVAL;
7306		return;
7307	}
7308#endif
7309	addr_touse = sa;
7310#ifdef INET6
7311	if (sa->sa_family == AF_INET6) {
7312		struct sockaddr_in6 *sin6;
7313#ifdef HAVE_SA_LEN
7314		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7315			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7316			*error = EINVAL;
7317			return;
7318		}
7319#endif
7320		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7321			/* can only bind v6 on PF_INET6 sockets */
7322			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7323			*error = EINVAL;
7324			return;
7325		}
7326		sin6 = (struct sockaddr_in6 *)addr_touse;
7327		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7328			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7329			    SCTP_IPV6_V6ONLY(inp)) {
7330				/* can't bind v4-mapped on PF_INET sockets */
7331				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7332				*error = EINVAL;
7333				return;
7334			}
7335			in6_sin6_2_sin(&sin, sin6);
7336			addr_touse = (struct sockaddr *)&sin;
7337		}
7338	}
7339#endif
7340#ifdef INET
7341	if (sa->sa_family == AF_INET) {
7342#ifdef HAVE_SA_LEN
7343		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7344			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7345			*error = EINVAL;
7346			return;
7347		}
7348#endif
7349		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7350		    SCTP_IPV6_V6ONLY(inp)) {
7351			/* can't bind v4 on PF_INET sockets */
7352			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7353			*error = EINVAL;
7354			return;
7355		}
7356	}
7357#endif
7358	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7359#if !(defined(__Panda__) || defined(__Windows__))
7360		if (p == NULL) {
7361			/* Can't get proc for Net/Open BSD */
7362			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7363			*error = EINVAL;
7364			return;
7365		}
7366#endif
7367		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
7368		return;
7369	}
7370	/*
7371	 * No locks required here since bind and mgmt_ep_sa
7372	 * all do their own locking. If we do something for
7373	 * the FIX: below we may need to lock in that case.
7374	 */
7375	if (assoc_id == 0) {
7376		/* add the address */
7377		struct sctp_inpcb *lep;
7378		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7379
7380		/* validate the incoming port */
7381		if ((lsin->sin_port != 0) &&
7382		    (lsin->sin_port != inp->sctp_lport)) {
7383			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7384			*error = EINVAL;
7385			return;
7386		} else {
7387			/* user specified 0 port, set it to existing port */
7388			lsin->sin_port = inp->sctp_lport;
7389		}
7390
7391		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7392		if (lep != NULL) {
7393			/*
7394			 * We must decrement the refcount
7395			 * since we have the ep already and
7396			 * are binding. No remove going on
7397			 * here.
7398			 */
7399			SCTP_INP_DECR_REF(lep);
7400		}
7401		if (lep == inp) {
7402			/* already bound to it.. ok */
7403			return;
7404		} else if (lep == NULL) {
7405			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7406			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7407						      SCTP_ADD_IP_ADDRESS,
7408						      vrf_id, NULL);
7409		} else {
7410			*error = EADDRINUSE;
7411		}
7412		if (*error)
7413			return;
7414	} else {
7415		/*
7416		 * FIX: decide whether we allow assoc based
7417		 * bindx
7418		 */
7419	}
7420}
7421
7422/*
7423 * sctp_bindx(DELETE) for one address.
7424 * assumes all arguments are valid/checked by caller.
7425 */
7426void
7427sctp_bindx_delete_address(struct sctp_inpcb *inp,
7428			  struct sockaddr *sa, sctp_assoc_t assoc_id,
7429			  uint32_t vrf_id, int *error)
7430{
7431	struct sockaddr *addr_touse;
7432#ifdef INET6
7433	struct sockaddr_in sin;
7434#endif
7435#ifdef SCTP_MVRF
7436	int i, fnd = 0;
7437#endif
7438
7439	/* see if we're bound all already! */
7440	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7441		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7442		*error = EINVAL;
7443		return;
7444	}
7445#ifdef SCTP_MVRF
7446	/* Is the VRF one we have */
7447	for (i = 0; i < inp->num_vrfs; i++) {
7448		if (vrf_id == inp->m_vrf_ids[i]) {
7449			fnd = 1;
7450			break;
7451		}
7452	}
7453	if (!fnd) {
7454		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7455		*error = EINVAL;
7456		return;
7457	}
7458#endif
7459	addr_touse = sa;
7460#ifdef INET6
7461	if (sa->sa_family == AF_INET6) {
7462		struct sockaddr_in6 *sin6;
7463#ifdef HAVE_SA_LEN
7464		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7465			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7466			*error = EINVAL;
7467			return;
7468		}
7469#endif
7470		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7471			/* can only bind v6 on PF_INET6 sockets */
7472			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7473			*error = EINVAL;
7474			return;
7475		}
7476		sin6 = (struct sockaddr_in6 *)addr_touse;
7477		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7478			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7479			    SCTP_IPV6_V6ONLY(inp)) {
7480				/* can't bind mapped-v4 on PF_INET sockets */
7481				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7482				*error = EINVAL;
7483				return;
7484			}
7485			in6_sin6_2_sin(&sin, sin6);
7486			addr_touse = (struct sockaddr *)&sin;
7487		}
7488	}
7489#endif
7490#ifdef INET
7491	if (sa->sa_family == AF_INET) {
7492#ifdef HAVE_SA_LEN
7493		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7494			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7495			*error = EINVAL;
7496			return;
7497		}
7498#endif
7499		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7500		    SCTP_IPV6_V6ONLY(inp)) {
7501			/* can't bind v4 on PF_INET sockets */
7502			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7503			*error = EINVAL;
7504			return;
7505		}
7506	}
7507#endif
7508	/*
7509	 * No lock required mgmt_ep_sa does its own locking.
7510	 * If the FIX: below is ever changed we may need to
7511	 * lock before calling association level binding.
7512	 */
7513	if (assoc_id == 0) {
7514		/* delete the address */
7515		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7516					      SCTP_DEL_IP_ADDRESS,
7517					      vrf_id, NULL);
7518	} else {
7519		/*
7520		 * FIX: decide whether we allow assoc based
7521		 * bindx
7522		 */
7523	}
7524}
7525
7526/*
7527 * returns the valid local address count for an assoc, taking into account
7528 * all scoping rules
7529 */
7530int
7531sctp_local_addr_count(struct sctp_tcb *stcb)
7532{
7533	int loopback_scope;
7534#if defined(INET)
7535	int ipv4_local_scope, ipv4_addr_legal;
7536#endif
7537#if defined (INET6)
7538	int local_scope, site_scope, ipv6_addr_legal;
7539#endif
7540#if defined(__Userspace__)
7541	int conn_addr_legal;
7542#endif
7543	struct sctp_vrf *vrf;
7544	struct sctp_ifn *sctp_ifn;
7545	struct sctp_ifa *sctp_ifa;
7546	int count = 0;
7547
7548	/* Turn on all the appropriate scopes */
7549	loopback_scope = stcb->asoc.scope.loopback_scope;
7550#if defined(INET)
7551	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7552	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7553#endif
7554#if defined(INET6)
7555	local_scope = stcb->asoc.scope.local_scope;
7556	site_scope = stcb->asoc.scope.site_scope;
7557	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7558#endif
7559#if defined(__Userspace__)
7560	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7561#endif
7562	SCTP_IPI_ADDR_RLOCK();
7563	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7564	if (vrf == NULL) {
7565		/* no vrf, no addresses */
7566		SCTP_IPI_ADDR_RUNLOCK();
7567		return (0);
7568	}
7569
7570	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7571		/*
7572		 * bound all case: go through all ifns on the vrf
7573		 */
7574		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7575			if ((loopback_scope == 0) &&
7576			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7577				continue;
7578			}
7579			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7580				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7581					continue;
7582				switch (sctp_ifa->address.sa.sa_family) {
7583#ifdef INET
7584				case AF_INET:
7585					if (ipv4_addr_legal) {
7586						struct sockaddr_in *sin;
7587
7588						sin = &sctp_ifa->address.sin;
7589						if (sin->sin_addr.s_addr == 0) {
7590							/* skip unspecified addrs */
7591							continue;
7592						}
7593#if defined(__FreeBSD__)
7594						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7595						                     &sin->sin_addr) != 0) {
7596							continue;
7597						}
7598#endif
7599						if ((ipv4_local_scope == 0) &&
7600						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7601							continue;
7602						}
7603						/* count this one */
7604						count++;
7605					} else {
7606						continue;
7607					}
7608					break;
7609#endif
7610#ifdef INET6
7611				case AF_INET6:
7612					if (ipv6_addr_legal) {
7613						struct sockaddr_in6 *sin6;
7614
7615#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7616						struct sockaddr_in6 lsa6;
7617#endif
7618						sin6 = &sctp_ifa->address.sin6;
7619						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7620							continue;
7621						}
7622#if defined(__FreeBSD__)
7623						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7624						                     &sin6->sin6_addr) != 0) {
7625							continue;
7626						}
7627#endif
7628						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7629							if (local_scope == 0)
7630								continue;
7631#if defined(SCTP_EMBEDDED_V6_SCOPE)
7632							if (sin6->sin6_scope_id == 0) {
7633#ifdef SCTP_KAME
7634								if (sa6_recoverscope(sin6) != 0)
7635									/*
7636									 * bad link
7637									 * local
7638									 * address
7639									 */
7640									continue;
7641#else
7642								lsa6 = *sin6;
7643								if (in6_recoverscope(&lsa6,
7644								                     &lsa6.sin6_addr,
7645								                     NULL))
7646									/*
7647									 * bad link
7648									 * local
7649									 * address
7650									 */
7651									continue;
7652								sin6 = &lsa6;
7653#endif /* SCTP_KAME */
7654							}
7655#endif /* SCTP_EMBEDDED_V6_SCOPE */
7656						}
7657						if ((site_scope == 0) &&
7658						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7659							continue;
7660						}
7661						/* count this one */
7662						count++;
7663					}
7664					break;
7665#endif
7666#if defined(__Userspace__)
7667				case AF_CONN:
7668					if (conn_addr_legal) {
7669						count++;
7670					}
7671					break;
7672#endif
7673				default:
7674					/* TSNH */
7675					break;
7676				}
7677			}
7678		}
7679	} else {
7680		/*
7681		 * subset bound case
7682		 */
7683		struct sctp_laddr *laddr;
7684		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7685			     sctp_nxt_addr) {
7686			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7687				continue;
7688			}
7689			/* count this one */
7690			count++;
7691		}
7692	}
7693	SCTP_IPI_ADDR_RUNLOCK();
7694	return (count);
7695}
7696
7697#if defined(SCTP_LOCAL_TRACE_BUF)
7698
7699void
7700sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7701{
7702	uint32_t saveindex, newindex;
7703
7704#if defined(__Windows__)
7705	if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7706		return;
7707	}
7708	do {
7709		saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7710		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7711			newindex = 1;
7712		} else {
7713			newindex = saveindex + 1;
7714		}
7715	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7716	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7717		saveindex = 0;
7718	}
7719	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7720	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7721	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7722	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
7723	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
7724	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
7725	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
7726	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
7727#else
7728	do {
7729		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7730		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7731			newindex = 1;
7732		} else {
7733			newindex = saveindex + 1;
7734		}
7735	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7736	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7737		saveindex = 0;
7738	}
7739	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7740	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7741	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7742	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7743	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7744	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7745	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7746	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7747#endif
7748}
7749
7750#endif
7751#if defined(__FreeBSD__)
7752#if __FreeBSD_version >= 800044
7753static void
7754sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
7755{
7756	struct ip *iph;
7757#ifdef INET6
7758	struct ip6_hdr *ip6;
7759#endif
7760	struct mbuf *sp, *last;
7761	struct udphdr *uhdr;
7762	uint16_t port;
7763
7764	if ((m->m_flags & M_PKTHDR) == 0) {
7765		/* Can't handle one that is not a pkt hdr */
7766		goto out;
7767	}
7768	/* Pull the src port */
7769	iph = mtod(m, struct ip *);
7770	uhdr = (struct udphdr *)((caddr_t)iph + off);
7771	port = uhdr->uh_sport;
7772	/* Split out the mbuf chain. Leave the
7773	 * IP header in m, place the
7774	 * rest in the sp.
7775	 */
7776	sp = m_split(m, off, M_NOWAIT);
7777	if (sp == NULL) {
7778		/* Gak, drop packet, we can't do a split */
7779		goto out;
7780	}
7781	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7782		/* Gak, packet can't have an SCTP header in it - too small */
7783		m_freem(sp);
7784		goto out;
7785	}
7786	/* Now pull up the UDP header and SCTP header together */
7787	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7788	if (sp == NULL) {
7789		/* Gak pullup failed */
7790		goto out;
7791	}
7792	/* Trim out the UDP header */
7793	m_adj(sp, sizeof(struct udphdr));
7794
7795	/* Now reconstruct the mbuf chain */
7796	for (last = m; last->m_next; last = last->m_next);
7797	last->m_next = sp;
7798	m->m_pkthdr.len += sp->m_pkthdr.len;
7799	iph = mtod(m, struct ip *);
7800	switch (iph->ip_v) {
7801#ifdef INET
7802	case IPVERSION:
7803#if __FreeBSD_version >= 1000000
7804		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7805#else
7806		iph->ip_len -= sizeof(struct udphdr);
7807#endif
7808		sctp_input_with_port(m, off, port);
7809		break;
7810#endif
7811#ifdef INET6
7812	case IPV6_VERSION >> 4:
7813		ip6 = mtod(m, struct ip6_hdr *);
7814		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7815		sctp6_input_with_port(&m, &off, port);
7816		break;
7817#endif
7818	default:
7819		goto out;
7820		break;
7821	}
7822	return;
7823 out:
7824	m_freem(m);
7825}
7826#endif
7827
7828void
7829sctp_over_udp_stop(void)
7830{
7831	/*
7832	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7833	 */
7834#ifdef INET
7835	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7836		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7837		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7838	}
7839#endif
7840#ifdef INET6
7841	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7842		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7843		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7844	}
7845#endif
7846}
7847
7848int
7849sctp_over_udp_start(void)
7850{
7851#if __FreeBSD_version >= 800044
7852	uint16_t port;
7853	int ret;
7854#ifdef INET
7855	struct sockaddr_in sin;
7856#endif
7857#ifdef INET6
7858	struct sockaddr_in6 sin6;
7859#endif
7860	/*
7861	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7862	 */
7863	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7864	if (ntohs(port) == 0) {
7865		/* Must have a port set */
7866		return (EINVAL);
7867	}
7868#ifdef INET
7869	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7870		/* Already running -- must stop first */
7871		return (EALREADY);
7872	}
7873#endif
7874#ifdef INET6
7875	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7876		/* Already running -- must stop first */
7877		return (EALREADY);
7878	}
7879#endif
7880#ifdef INET
7881	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7882	                    SOCK_DGRAM, IPPROTO_UDP,
7883	                    curthread->td_ucred, curthread))) {
7884		sctp_over_udp_stop();
7885		return (ret);
7886	}
7887	/* Call the special UDP hook. */
7888	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7889	                                    sctp_recv_udp_tunneled_packet))) {
7890		sctp_over_udp_stop();
7891		return (ret);
7892	}
7893	/* Ok, we have a socket, bind it to the port. */
7894	memset(&sin, 0, sizeof(struct sockaddr_in));
7895	sin.sin_len = sizeof(struct sockaddr_in);
7896	sin.sin_family = AF_INET;
7897	sin.sin_port = htons(port);
7898	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7899	                  (struct sockaddr *)&sin, curthread))) {
7900		sctp_over_udp_stop();
7901		return (ret);
7902	}
7903#endif
7904#ifdef INET6
7905	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7906	                    SOCK_DGRAM, IPPROTO_UDP,
7907	                    curthread->td_ucred, curthread))) {
7908		sctp_over_udp_stop();
7909		return (ret);
7910	}
7911	/* Call the special UDP hook. */
7912	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7913	                                    sctp_recv_udp_tunneled_packet))) {
7914		sctp_over_udp_stop();
7915		return (ret);
7916	}
7917	/* Ok, we have a socket, bind it to the port. */
7918	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7919	sin6.sin6_len = sizeof(struct sockaddr_in6);
7920	sin6.sin6_family = AF_INET6;
7921	sin6.sin6_port = htons(port);
7922	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7923	                  (struct sockaddr *)&sin6, curthread))) {
7924		sctp_over_udp_stop();
7925		return (ret);
7926	}
7927#endif
7928	return (0);
7929#else
7930	return (ENOTSUP);
7931#endif
7932}
7933#endif
7934