summaryrefslogtreecommitdiff
path: root/ext/sctp
diff options
context:
space:
mode:
authorTim-Philipp Müller <tim@centricular.com>2020-07-24 19:23:19 +0100
committerTim-Philipp Müller <tim@centricular.com>2020-08-14 01:32:45 +0100
commitf4538e24b6ef9185cb8233d16ac536950b36e10a (patch)
treeaae8f59d793aebe87904324d4fd8e2ab467a975e /ext/sctp
parentfe3a0c2c9088cab2d695a565e561c79f9d4df8cf (diff)
downloadgstreamer-plugins-bad-f4538e24b6ef9185cb8233d16ac536950b36e10a.tar.gz
sctp: import internal copy of usrsctp library
There are problems with global shared state and no API stability guarantees, and we can't rely on distros shipping the fixes we need. Both firefox and Chrome bundle their own copies too. Imported from https://github.com/sctplab/usrsctp, commit 547d3b46c64876c0336b9eef297fda58dbe1adaf Date: Thu Jul 23 21:49:32 2020 +0200 Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/issues/870 Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1465>
Diffstat (limited to 'ext/sctp')
-rw-r--r--ext/sctp/usrsctp/.gitignore84
-rw-r--r--ext/sctp/usrsctp/LICENSE.md27
-rw-r--r--ext/sctp/usrsctp/meson.build220
-rw-r--r--ext/sctp/usrsctp/meson_options.txt10
-rw-r--r--ext/sctp/usrsctp/usrsctplib/Makefile.am81
-rw-r--r--ext/sctp/usrsctp/usrsctplib/meson.build20
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/meson.build20
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp.h672
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.c3524
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.h96
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.c2314
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.h216
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.c996
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.h73
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.c249
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.h119
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_cc_functions.c2508
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_constants.h1078
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.c831
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.h56
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_header.h611
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.c5840
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.h124
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.c6426
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.h66
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_lock_userspace.h245
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_os.h92
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_os_userspace.h1140
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.c14993
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.h248
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.c8127
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.h879
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.c303
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.h70
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_process_lock.h677
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.c329
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.h90
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_ss_functions.c1130
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_structs.h1296
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.c1625
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.h632
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.c1633
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.h104
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_uio.h1361
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_userspace.c406
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_usrreq.c9071
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctp_var.h448
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctputil.c8695
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet/sctputil.h422
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet6/meson.build1
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_usrreq.c1809
-rw-r--r--ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_var.h81
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_atomic.h315
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_environment.c132
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_environment.h118
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_inpcb.h373
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_ip6_var.h126
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_ip_icmp.h225
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_malloc.h203
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_mbuf.c1566
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_mbuf.h413
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_queue.h639
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_recv_thread.c1521
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_recv_thread.h34
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_route.h130
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_socket.c3590
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_socketvar.h527
-rw-r--r--ext/sctp/usrsctp/usrsctplib/user_uma.h96
-rw-r--r--ext/sctp/usrsctp/usrsctplib/usrsctp.h1321
69 files changed, 93497 insertions, 0 deletions
diff --git a/ext/sctp/usrsctp/.gitignore b/ext/sctp/usrsctp/.gitignore
new file mode 100644
index 000000000..9e2d1a2a3
--- /dev/null
+++ b/ext/sctp/usrsctp/.gitignore
@@ -0,0 +1,84 @@
+# Object files
+*.o
+*.ko
+*.obj
+*.elf
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Libraries
+*.lib
+*.a
+*.la
+*.lo
+
+# Shared objects (inc. Windows DLLs)
+*.dll
+*.so
+*.so.*
+*.dylib
+
+# Executables
+*.exe
+*.out
+*.app
+*.i*86
+*.x86_64
+*.hex
+
+# Debug files
+*.dSYM
+
+# Automake and make releated stuff
+Makefile
+Makefile.in
+.deps
+.libs
+/autom4te.cache
+/aclocal.m4
+/compile
+/configure
+/depcomp
+/install-sh
+/missing
+/stamp-h1
+/config.guess
+/config.log
+/config.status
+/config.sub
+/libtool
+/ltmain.sh
+/m4
+/usrsctp.pc
+.dirstamp
+
+# binaries
+programs/chargen_server_upcall
+programs/client
+programs/client_upcall
+programs/daytime_server
+programs/daytime_server_upcall
+programs/discard_server
+programs/discard_server_upcall
+programs/echo_server
+programs/echo_server_upcall
+programs/ekr_client
+programs/ekr_loop
+programs/ekr_loop_upcall
+programs/ekr_peer
+programs/ekr_server
+programs/http_client
+programs/http_client_upcall
+programs/rtcweb
+programs/st_client
+programs/tsctp
+programs/test_libmgmt
+programs/test_timer
+
+# callgrind
+callgrind.out*
+
+# cmake build
+/build
diff --git a/ext/sctp/usrsctp/LICENSE.md b/ext/sctp/usrsctp/LICENSE.md
new file mode 100644
index 000000000..a2d1f989c
--- /dev/null
+++ b/ext/sctp/usrsctp/LICENSE.md
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Randall Stewart and Michael Tuexen
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of usrsctp nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ext/sctp/usrsctp/meson.build b/ext/sctp/usrsctp/meson.build
new file mode 100644
index 000000000..1d216e192
--- /dev/null
+++ b/ext/sctp/usrsctp/meson.build
@@ -0,0 +1,220 @@
+# Project definition
+project('usrsctplib', 'c',
+ version: '1.0.0',
+ default_options: ['c_std=c99'],
+ meson_version: '>=0.49.0')
+
+# Set compiler warning flags
+compiler = meson.get_compiler('c')
+if compiler.get_argument_syntax() == 'msvc'
+ compiler_args = compiler.get_supported_arguments([
+ '/wd4100', # 'identifier' : unreferenced formal parameter
+ '/wd4127', # conditional expression is constant
+ '/wd4200', # nonstandard extension used : zero-sized array in struct/union
+ '/wd4214', # bit field types other than int
+ '/wd4706', # assignment within conditional expression
+ '/wd4245', # 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch
+ '/wd4389', # 'operator' : signed/unsigned mismatch
+ '/wd4702', # unreachable code
+ '/wd4701', # Potentially uninitialized local variable 'name' used
+ '/wd4244', # 'conversion' conversion from 'type1' to 'type2', possible loss of data
+ ])
+else
+ compiler_args = compiler.get_supported_arguments([
+ '-pedantic',
+ '-Wall',
+ '-Wextra',
+ '-Wfloat-equal',
+ '-Wshadow',
+ '-Wpointer-arith',
+ '-Winit-self',
+ '-Wno-unused-function',
+ '-Wno-unused-parameter',
+ '-Wno-unreachable-code',
+ '-Wstrict-prototypes',
+ ])
+endif
+add_project_arguments(compiler_args, language: 'c')
+
+# Configuration
+compile_args = []
+
+# Dependency: Threads
+thread_dep = dependency('threads', required: true)
+
+# Dependencies list
+dependencies = [
+ thread_dep,
+]
+
+# Global settings
+add_project_arguments([
+ '-D__Userspace__',
+ '-DSCTP_SIMPLE_ALLOCATOR',
+ '-DSCTP_PROCESS_LEVEL_LOCKS',
+], language: 'c')
+
+# OS-specific settings
+system = host_machine.system()
+if system in ['linux', 'android']
+ add_project_arguments([
+ '-D_GNU_SOURCE',
+ ], language: 'c')
+elif system == 'freebsd'
+ add_project_arguments(compiler.get_supported_arguments([
+ '-Wno-address-of-packed-member',
+ ]), language: 'c')
+elif system in ['darwin', 'ios']
+ add_project_arguments([
+ '-D__APPLE_USE_RFC_2292',
+ ] + compiler.get_supported_arguments([
+ '-Wno-address-of-packed-member',
+ '-Wno-deprecated-declarations',
+ ]), language: 'c')
+elif system == 'windows'
+ dependencies += compiler.find_library('ws2_32', required: true)
+ dependencies += compiler.find_library('iphlpapi', required: true)
+ if compiler.get_id() == 'gcc'
+ add_project_arguments(compiler.get_supported_arguments([
+ '-Wno-format',
+ '-D_WIN32_WINNT=0x601', # Enables inet_ntop and friends
+ ]), language: 'c')
+ endif
+else
+ error('Unknown system: @0@'.format(system))
+endif
+
+# Feature: sys/queue
+if compiler.has_header('sys/queue.h')
+ add_project_arguments('-DHAVE_SYS_QUEUE_H', language: 'c')
+endif
+
+# Feature: sys/socket, linux/ifaddr, linux/rtnetlink
+if compiler.has_header('sys/socket.h')
+ if compiler.has_header('linux/if_addr.h')
+ add_project_arguments('-DHAVE_LINUX_IF_ADDR_H', language: 'c')
+ endif
+
+ if compiler.has_header('linux/rtnetlink.h')
+ add_project_arguments('-DHAVE_LINUX_RTNETLINK_H', language: 'c')
+ endif
+endif
+
+# Feature: ICMP
+have_sys_types = compiler.has_header('sys/types.h')
+have_netinet_in = compiler.has_header('netinet/in.h')
+have_netinet_ip = compiler.has_header('netinet/ip.h')
+have_netinet_ip_icmp = compiler.has_header('netinet/ip_icmp.h')
+if have_sys_types and have_netinet_in and have_netinet_ip and have_netinet_ip_icmp
+ add_project_arguments('-DHAVE_NETINET_IP_ICMP_H', language: 'c')
+endif
+
+# Feature: stdatomic
+if compiler.has_header('stdatomic.h')
+ add_project_arguments('-DHAVE_STDATOMIC_H', language: 'c')
+endif
+
+# Feature: sockaddr.sa_len
+prefix = '''
+#include <sys/types.h>
+#include <sys/socket.h>
+'''
+have_sa_len = compiler.has_member('struct sockaddr', 'sa_len', prefix: prefix)
+if have_sa_len
+ add_project_arguments('-DHAVE_SA_LEN', language: 'c')
+endif
+
+# Feature: sockaddr_in.sin_len / sockaddr_in6.sin6_len / sockaddr_conn.sconn_len
+prefix = '''
+#include <sys/types.h>
+#include <netinet/in.h>
+'''
+have_sin_len = compiler.has_member('struct sockaddr_in', 'sin_len', prefix: prefix)
+if have_sin_len
+ add_project_arguments('-DHAVE_SIN_LEN', language: 'c')
+endif
+have_sin6_len = compiler.has_member('struct sockaddr_in6', 'sin6_len', prefix: prefix)
+if have_sin6_len
+ add_project_arguments('-DHAVE_SIN6_LEN', language: 'c')
+endif
+have_sconn_len = compiler.has_member('struct sockaddr_conn', 'sconn_len', prefix: '#include "usrsctp.h"', include_directories: include_directories('usrsctplib'))
+if have_sconn_len
+ add_project_arguments('-DHAVE_SCONN_LEN', language: 'c')
+endif
+
+# Options
+if get_option('sctp_invariants')
+ add_project_arguments('-DINVARIANTS', language: 'c')
+endif
+if get_option('sctp_debug')
+ add_project_arguments('-DSCTP_DEBUG', language: 'c')
+ compile_args += '-DSCTP_DEBUG'
+endif
+if get_option('sctp_inet')
+ add_project_arguments('-DINET', language: 'c')
+endif
+if get_option('sctp_inet6')
+ add_project_arguments('-DINET6', language: 'c')
+endif
+
+# Library
+subdir('usrsctplib')
+
+# Build library
+if compiler.get_id() == 'msvc' and get_option('default_library') == 'shared'
+ # Needed by usrsctp_def
+ find_program('dumpbin')
+
+ usrsctp_static = static_library('usrsctp-static', sources,
+ dependencies: dependencies,
+ include_directories: include_dirs)
+
+ usrsctp_def = custom_target('usrsctp.def',
+ command: [find_program('gen-def.py'), '@INPUT@'],
+ input: usrsctp_static,
+ output: 'usrsctp.def',
+ capture: true)
+
+ usrsctp = shared_library('usrsctp',
+ link_whole: usrsctp_static,
+ dependencies: dependencies,
+ vs_module_defs: usrsctp_def,
+ install: true,
+ version: meson.project_version())
+else
+ usrsctp = library('usrsctp', sources,
+ dependencies: dependencies,
+ include_directories: include_dirs,
+ install: true,
+ version: meson.project_version(),
+ c_args: '-U__APPLE__')
+endif
+
+# Declare dependency
+usrsctp_dep = declare_dependency(
+ compile_args: compile_args,
+ include_directories: include_dirs,
+ link_with: usrsctp)
+
+# Generate pkg-config file
+pkg = import('pkgconfig')
+pkg.generate(usrsctp,
+ name: 'usrsctp',
+ description: 'A portable SCTP userland stack',
+ url: 'https://github.com/sctplab/usrsctp',
+ extra_cflags: compile_args)
+
+# Programs (optional)
+if get_option('sctp_build_programs')
+ subdir('programs')
+
+ # Build executables
+ foreach name, sources : programs
+ executable(
+ name,
+ programs_helper_sources + sources,
+ dependencies: dependencies,
+ link_with: usrsctp,
+ include_directories: include_dirs)
+ endforeach
+endif
diff --git a/ext/sctp/usrsctp/meson_options.txt b/ext/sctp/usrsctp/meson_options.txt
new file mode 100644
index 000000000..56d950612
--- /dev/null
+++ b/ext/sctp/usrsctp/meson_options.txt
@@ -0,0 +1,10 @@
+option('sctp_invariants', type: 'boolean', value: false,
+ description: 'Add runtime checks')
+option('sctp_debug', type: 'boolean', value: false,
+ description: 'Provide debug information')
+option('sctp_inet', type: 'boolean', value: true,
+ description: 'Support IPv4')
+option('sctp_inet6', type: 'boolean', value: true,
+ description: 'Support IPv6')
+option('sctp_build_programs', type: 'boolean', value: true,
+ description: 'Build example programs')
diff --git a/ext/sctp/usrsctp/usrsctplib/Makefile.am b/ext/sctp/usrsctp/usrsctplib/Makefile.am
new file mode 100644
index 000000000..85240b7ce
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/Makefile.am
@@ -0,0 +1,81 @@
+#
+# Copyright (C) 2011-2012 Michael Tuexen
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the project nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+AUTOMAKE_OPTIONS = subdir-objects
+EXTRA_DIST = Makefile.nmake
+lib_LTLIBRARIES = libusrsctp.la
+libusrsctp_la_SOURCES = user_atomic.h \
+ user_environment.c user_environment.h \
+ user_inpcb.h \
+ user_ip_icmp.h \
+ user_ip6_var.h \
+ user_malloc.h \
+ user_mbuf.c \
+ user_mbuf.h \
+ user_queue.h \
+ user_recv_thread.c user_recv_thread.h \
+ user_route.h \
+ user_socket.c \
+ user_socketvar.h \
+ user_uma.h \
+ netinet/sctp.h \
+ netinet/sctp_asconf.c netinet/sctp_asconf.h \
+ netinet/sctp_auth.c netinet/sctp_auth.h \
+ netinet/sctp_bsd_addr.c netinet/sctp_bsd_addr.h \
+ netinet/sctp_callout.c netinet/sctp_callout.h \
+ netinet/sctp_cc_functions.c \
+ netinet/sctp_constants.h \
+ netinet/sctp_crc32.c netinet/sctp_crc32.h \
+ netinet/sctp_header.h \
+ netinet/sctp_indata.c netinet/sctp_indata.h \
+ netinet/sctp_input.c netinet/sctp_input.h \
+ netinet/sctp_lock_userspace.h \
+ netinet/sctp_os.h \
+ netinet/sctp_os_userspace.h \
+ netinet/sctp_output.c netinet/sctp_output.h \
+ netinet/sctp_pcb.c netinet/sctp_pcb.h \
+ netinet/sctp_peeloff.c netinet/sctp_peeloff.h \
+ netinet/sctp_process_lock.h \
+ netinet/sctp_sha1.c netinet/sctp_sha1.h \
+ netinet/sctp_ss_functions.c \
+ netinet/sctp_structs.h \
+ netinet/sctp_sysctl.c netinet/sctp_sysctl.h \
+ netinet/sctp_userspace.c \
+ netinet/sctp_timer.c netinet/sctp_timer.h \
+ netinet/sctp_uio.h \
+ netinet/sctp_usrreq.c \
+ netinet/sctp_var.h \
+ netinet/sctputil.c netinet/sctputil.h \
+ netinet6/sctp6_var.h \
+ netinet6/sctp6_usrreq.c
+libusrsctp_la_CFLAGS = $(LIBCFLAGS)
+libusrsctp_la_LDFLAGS = -version-info 1:0:0
+include_HEADERS = usrsctp.h
+
diff --git a/ext/sctp/usrsctp/usrsctplib/meson.build b/ext/sctp/usrsctp/usrsctplib/meson.build
new file mode 100644
index 000000000..c1a6656f1
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/meson.build
@@ -0,0 +1,20 @@
+# Includes
+include_dirs = include_directories(
+ '.',
+ 'netinet',
+ 'netinet6',
+)
+
+# Sources
+sources = files([
+ 'user_environment.c',
+ 'user_mbuf.c',
+ 'user_recv_thread.c',
+ 'user_socket.c',
+])
+
+subdir('netinet')
+subdir('netinet6')
+
+# Install usrsctp.h
+install_headers('usrsctp.h')
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/meson.build b/ext/sctp/usrsctp/usrsctplib/netinet/meson.build
new file mode 100644
index 000000000..4a86615bc
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/meson.build
@@ -0,0 +1,20 @@
+sources += files([
+ 'sctp_asconf.c',
+ 'sctp_auth.c',
+ 'sctp_bsd_addr.c',
+ 'sctp_callout.c',
+ 'sctp_cc_functions.c',
+ 'sctp_crc32.c',
+ 'sctp_indata.c',
+ 'sctp_input.c',
+ 'sctp_output.c',
+ 'sctp_pcb.c',
+ 'sctp_peeloff.c',
+ 'sctp_sha1.c',
+ 'sctp_ss_functions.c',
+ 'sctp_sysctl.c',
+ 'sctp_timer.c',
+ 'sctp_userspace.c',
+ 'sctp_usrreq.c',
+ 'sctputil.c',
+])
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp.h
new file mode 100644
index 000000000..8d8bf31fe
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp.h
@@ -0,0 +1,672 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 356357 2020-01-04 20:33:12Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_H_
+#define _NETINET_SCTP_H_
+
+#if defined(__APPLE__) || defined(__linux__)
+#include <stdint.h>
+#endif
+
+#include <sys/types.h>
+
+
+#if !defined(_WIN32)
+#define SCTP_PACKED __attribute__((packed))
+#else
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#endif
+
+/*
+ * SCTP protocol - RFC4960.
+ */
+struct sctphdr {
+ uint16_t src_port; /* source port */
+ uint16_t dest_port; /* destination port */
+ uint32_t v_tag; /* verification tag of packet */
+ uint32_t checksum; /* CRC32C checksum */
+ /* chunks follow... */
+} SCTP_PACKED;
+
+/*
+ * SCTP Chunks
+ */
+struct sctp_chunkhdr {
+ uint8_t chunk_type; /* chunk type */
+ uint8_t chunk_flags; /* chunk flags */
+ uint16_t chunk_length; /* chunk length */
+ /* optional params follow */
+} SCTP_PACKED;
+
+/*
+ * SCTP chunk parameters
+ */
+struct sctp_paramhdr {
+ uint16_t param_type; /* parameter type */
+ uint16_t param_length; /* parameter length */
+} SCTP_PACKED;
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO 0x00000001
+#define SCTP_ASSOCINFO 0x00000002
+#define SCTP_INITMSG 0x00000003
+#define SCTP_NODELAY 0x00000004
+#define SCTP_AUTOCLOSE 0x00000005
+#define SCTP_SET_PEER_PRIMARY_ADDR 0x00000006
+#define SCTP_PRIMARY_ADDR 0x00000007
+#define SCTP_ADAPTATION_LAYER 0x00000008
+/* same as above */
+#define SCTP_ADAPTION_LAYER 0x00000008
+#define SCTP_DISABLE_FRAGMENTS 0x00000009
+#define SCTP_PEER_ADDR_PARAMS 0x0000000a
+#define SCTP_DEFAULT_SEND_PARAM 0x0000000b
+/* ancillary data/notification interest options */
+#define SCTP_EVENTS 0x0000000c /* deprecated */
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR 0x0000000d
+#define SCTP_MAXSEG 0x0000000e
+#define SCTP_DELAYED_SACK 0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE 0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT 0x00000011
+/* authentication support */
+#define SCTP_AUTH_CHUNK 0x00000012
+#define SCTP_AUTH_KEY 0x00000013
+#define SCTP_HMAC_IDENT 0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 0x00000015
+#define SCTP_AUTH_DELETE_KEY 0x00000016
+#define SCTP_USE_EXT_RCVINFO 0x00000017
+#define SCTP_AUTO_ASCONF 0x00000018 /* rw */
+#define SCTP_MAXBURST 0x00000019 /* rw */
+#define SCTP_MAX_BURST 0x00000019 /* rw */
+/* assoc level context */
+#define SCTP_CONTEXT 0x0000001a /* rw */
+/* explicit EOR signalling */
+#define SCTP_EXPLICIT_EOR 0x0000001b
+#define SCTP_REUSE_PORT 0x0000001c /* rw */
+#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
+#define SCTP_EVENT 0x0000001e
+#define SCTP_RECVRCVINFO 0x0000001f
+#define SCTP_RECVNXTINFO 0x00000020
+#define SCTP_DEFAULT_SNDINFO 0x00000021
+#define SCTP_DEFAULT_PRINFO 0x00000022
+#define SCTP_PEER_ADDR_THLDS 0x00000023
+#define SCTP_REMOTE_UDP_ENCAPS_PORT 0x00000024
+#define SCTP_ECN_SUPPORTED 0x00000025
+#define SCTP_PR_SUPPORTED 0x00000026
+#define SCTP_AUTH_SUPPORTED 0x00000027
+#define SCTP_ASCONF_SUPPORTED 0x00000028
+#define SCTP_RECONFIG_SUPPORTED 0x00000029
+#define SCTP_NRSACK_SUPPORTED 0x00000030
+#define SCTP_PKTDROP_SUPPORTED 0x00000031
+#define SCTP_MAX_CWND 0x00000032
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS 0x00000100
+#define SCTP_GET_PEER_ADDR_INFO 0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 0x00000103
+#define SCTP_GET_ASSOC_NUMBER 0x00000104 /* ro */
+#define SCTP_GET_ASSOC_ID_LIST 0x00000105 /* ro */
+#define SCTP_TIMEOUTS 0x00000106
+#define SCTP_PR_STREAM_STATUS 0x00000107
+#define SCTP_PR_ASSOC_STATUS 0x00000108
+
+/*
+ * user socket options: BSD implementation specific
+ */
+/*
+ * Blocking I/O is enabled on any TCP type socket by default. For the UDP
+ * model if this is turned on then the socket buffer is shared for send
+ * resources amongst all associations. The default for the UDP model is that
+ * is SS_NBIO is set. Which means all associations have a separate send
+ * limit BUT they will NOT ever BLOCK instead you will get an error back
+ * EAGAIN if you try to send too much. If you want the blocking semantics you
+ * set this option at the cost of sharing one socket send buffer size amongst
+ * all associations. Peeled off sockets turn this option off and block. But
+ * since both TCP and peeled off sockets have only one assoc per socket this
+ * is fine. It probably does NOT make sense to set this on SS_NBIO on a TCP
+ * model OR peeled off UDP model, but we do allow you to do so. You just use
+ * the normal syscall to toggle SS_NBIO the way you want.
+ *
+ * Blocking I/O is controlled by the SS_NBIO flag on the socket state so_state
+ * field.
+ */
+
+#define SCTP_ENABLE_STREAM_RESET 0x00000900 /* struct sctp_assoc_value */
+#define SCTP_RESET_STREAMS 0x00000901 /* struct sctp_reset_streams */
+#define SCTP_RESET_ASSOC 0x00000902 /* sctp_assoc_t */
+#define SCTP_ADD_STREAMS 0x00000903 /* struct sctp_add_streams */
+
+/* For enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ 0x00000001
+#define SCTP_ENABLE_RESET_ASSOC_REQ 0x00000002
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x00000004
+#define SCTP_ENABLE_VALUE_MASK 0x00000007
+/* For reset streams */
+#define SCTP_STREAM_RESET_INCOMING 0x00000001
+#define SCTP_STREAM_RESET_OUTGOING 0x00000002
+
+
+/* here on down are more implementation specific */
+#define SCTP_SET_DEBUG_LEVEL 0x00001005
+#define SCTP_CLR_STAT_LOG 0x00001007
+/* CMT ON/OFF socket option */
+#define SCTP_CMT_ON_OFF 0x00001200
+#define SCTP_CMT_USE_DAC 0x00001201
+/* JRS - Pluggable Congestion Control Socket option */
+#define SCTP_PLUGGABLE_CC 0x00001202
+/* RS - Pluggable Stream Scheduling Socket option */
+#define SCTP_PLUGGABLE_SS 0x00001203
+#define SCTP_SS_VALUE 0x00001204
+#define SCTP_CC_OPTION 0x00001205 /* Options for CC modules */
+/* For I-DATA */
+#define SCTP_INTERLEAVING_SUPPORTED 0x00001206
+
+/* read only */
+#define SCTP_GET_SNDBUF_USE 0x00001101
+#define SCTP_GET_STAT_LOG 0x00001103
+#define SCTP_PCB_STATUS 0x00001104
+#define SCTP_GET_NONCE_VALUES 0x00001105
+
+
+/* Special hook for dynamically setting primary for all assoc's,
+ * this is a write only option that requires root privilege.
+ */
+#define SCTP_SET_DYNAMIC_PRIMARY 0x00002001
+
+/* VRF (virtual router feature) and multi-VRF support
+ * options. VRF's provide splits within a router
+ * that give the views of multiple routers. A
+ * standard host, without VRF support, is just
+ * a single VRF. If VRF's are supported then
+ * the transport must be VRF aware. This means
+ * that every socket call coming in must be directed
+ * within the endpoint to one of the VRF's it belongs
+ * to. The endpoint, before binding, may select
+ * the "default" VRF it is in by using a set socket
+ * option with SCTP_VRF_ID. This will also
+ * get propagated to the default VRF. Once the
+ * endpoint binds an address then it CANNOT add
+ * additional VRF's to become a Multi-VRF endpoint.
+ *
+ * Before BINDING additional VRF's can be added with
+ * the SCTP_ADD_VRF_ID call or deleted with
+ * SCTP_DEL_VRF_ID.
+ *
+ * Associations are ALWAYS contained inside a single
+ * VRF. They cannot reside in two (or more) VRF's. Incoming
+ * packets, assuming the router is VRF aware, can always
+ * tell us what VRF they arrived on. A host not supporting
+ * any VRF's will find that the packets always arrived on the
+ * single VRF that the host has.
+ *
+ */
+
+#define SCTP_VRF_ID 0x00003001
+#define SCTP_ADD_VRF_ID 0x00003002
+#define SCTP_GET_VRF_IDS 0x00003003
+#define SCTP_GET_ASOC_VRF 0x00003004
+#define SCTP_DEL_VRF_ID 0x00003005
+
+/*
+ * If you enable packet logging you can get
+ * a poor mans ethereal output in binary
+ * form. Note this is a compile option to
+ * the kernel, SCTP_PACKET_LOGGING, and
+ * without it in your kernel you
+ * will get a EOPNOTSUPP
+ */
+#define SCTP_GET_PACKET_LOG 0x00004001
+
+/*
+ * hidden implementation specific options these are NOT user visible (should
+ * move out of sctp.h)
+ */
+/* sctp_bindx() flags as hidden socket options */
+#define SCTP_BINDX_ADD_ADDR 0x00008001
+#define SCTP_BINDX_REM_ADDR 0x00008002
+/* Hidden socket option that gets the addresses */
+#define SCTP_GET_PEER_ADDRESSES 0x00008003
+#define SCTP_GET_LOCAL_ADDRESSES 0x00008004
+/* return the total count in bytes needed to hold all local addresses bound */
+#define SCTP_GET_LOCAL_ADDR_SIZE 0x00008005
+/* Return the total count in bytes needed to hold the remote address */
+#define SCTP_GET_REMOTE_ADDR_SIZE 0x00008006
+/* hidden option for connectx */
+#define SCTP_CONNECT_X 0x00008007
+/* hidden option for connectx_delayed, part of sendx */
+#define SCTP_CONNECT_X_DELAYED 0x00008008
+#define SCTP_CONNECT_X_COMPLETE 0x00008009
+/* hidden socket option based sctp_peeloff */
+#define SCTP_PEELOFF 0x0000800a
+/* the real worker for sctp_getaddrlen() */
+#define SCTP_GET_ADDR_LEN 0x0000800b
+#if defined(__APPLE__) && !defined(__Userspace__)
+/* temporary workaround for Apple listen() issue, no args used */
+#define SCTP_LISTEN_FIX 0x0000800c
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+/* workaround for Cygwin on Windows: returns the SOCKET handle */
+#define SCTP_GET_HANDLE 0x0000800d
+#endif
+/* Debug things that need to be purged */
+#define SCTP_SET_INITIAL_DBG_SEQ 0x00009f00
+
+/* JRS - Supported congestion control modules for pluggable
+ * congestion control
+ */
+/* Standard TCP Congestion Control */
+#define SCTP_CC_RFC2581 0x00000000
+/* High Speed TCP Congestion Control (Floyd) */
+#define SCTP_CC_HSTCP 0x00000001
+/* HTCP Congestion Control */
+#define SCTP_CC_HTCP 0x00000002
+/* RTCC Congestion Control - RFC2581 plus */
+#define SCTP_CC_RTCC 0x00000003
+
+#define SCTP_CC_OPT_RTCC_SETMODE 0x00002000
+#define SCTP_CC_OPT_USE_DCCC_ECN 0x00002001
+#define SCTP_CC_OPT_STEADY_STEP 0x00002002
+
+#define SCTP_CMT_OFF 0
+#define SCTP_CMT_BASE 1
+#define SCTP_CMT_RPV1 2
+#define SCTP_CMT_RPV2 3
+#define SCTP_CMT_MPTCP 4
+#define SCTP_CMT_MAX SCTP_CMT_MPTCP
+
+/* RS - Supported stream scheduling modules for pluggable
+ * stream scheduling
+ */
+/* Default simple round-robin */
+#define SCTP_SS_DEFAULT 0x00000000
+/* Real round-robin */
+#define SCTP_SS_ROUND_ROBIN 0x00000001
+/* Real round-robin per packet */
+#define SCTP_SS_ROUND_ROBIN_PACKET 0x00000002
+/* Priority */
+#define SCTP_SS_PRIORITY 0x00000003
+/* Fair Bandwidth */
+#define SCTP_SS_FAIR_BANDWITH 0x00000004
+/* First-come, first-serve */
+#define SCTP_SS_FIRST_COME 0x00000005
+
+
+/* fragment interleave constants
+ * setting must be one of these or
+ * EINVAL returned.
+ */
+#define SCTP_FRAG_LEVEL_0 0x00000000
+#define SCTP_FRAG_LEVEL_1 0x00000001
+#define SCTP_FRAG_LEVEL_2 0x00000002
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED 0x0000
+#define SCTP_BOUND 0x1000
+#define SCTP_LISTEN 0x2000
+#define SCTP_COOKIE_WAIT 0x0002
+#define SCTP_COOKIE_ECHOED 0x0004
+#define SCTP_ESTABLISHED 0x0008
+#define SCTP_SHUTDOWN_SENT 0x0010
+#define SCTP_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_SHUTDOWN_PENDING 0x0080
+
+/*
+ * SCTP operational error codes (user visible)
+ */
+#define SCTP_CAUSE_NO_ERROR 0x0000
+#define SCTP_CAUSE_INVALID_STREAM 0x0001
+#define SCTP_CAUSE_MISSING_PARAM 0x0002
+#define SCTP_CAUSE_STALE_COOKIE 0x0003
+#define SCTP_CAUSE_OUT_OF_RESC 0x0004
+#define SCTP_CAUSE_UNRESOLVABLE_ADDR 0x0005
+#define SCTP_CAUSE_UNRECOG_CHUNK 0x0006
+#define SCTP_CAUSE_INVALID_PARAM 0x0007
+#define SCTP_CAUSE_UNRECOG_PARAM 0x0008
+#define SCTP_CAUSE_NO_USER_DATA 0x0009
+#define SCTP_CAUSE_COOKIE_IN_SHUTDOWN 0x000a
+#define SCTP_CAUSE_RESTART_W_NEWADDR 0x000b
+#define SCTP_CAUSE_USER_INITIATED_ABT 0x000c
+#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
+
+/* Error causes from RFC5061 */
+#define SCTP_CAUSE_DELETING_LAST_ADDR 0x00a0
+#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x00a1
+#define SCTP_CAUSE_DELETING_SRC_ADDR 0x00a2
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x00a3
+#define SCTP_CAUSE_REQUEST_REFUSED 0x00a4
+
+/* Error causes from nat-draft */
+#define SCTP_CAUSE_NAT_COLLIDING_STATE 0x00b0
+#define SCTP_CAUSE_NAT_MISSING_STATE 0x00b1
+
+/* Error causes from RFC4895 */
+#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
+
+/*
+ * error cause parameters (user visible)
+ */
+struct sctp_gen_error_cause {
+ uint16_t code;
+ uint16_t length;
+ uint8_t info[];
+} SCTP_PACKED;
+
+struct sctp_error_cause {
+ uint16_t code;
+ uint16_t length;
+ /* optional cause-specific info may follow */
+} SCTP_PACKED;
+
+struct sctp_error_invalid_stream {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_INVALID_STREAM */
+ uint16_t stream_id; /* stream id of the DATA in error */
+ uint16_t reserved;
+} SCTP_PACKED;
+
+struct sctp_error_missing_param {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_MISSING_PARAM */
+ uint32_t num_missing_params; /* number of missing parameters */
+ uint16_t type[];
+} SCTP_PACKED;
+
+struct sctp_error_stale_cookie {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_STALE_COOKIE */
+ uint32_t stale_time; /* time in usec of staleness */
+} SCTP_PACKED;
+
+struct sctp_error_out_of_resource {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_OUT_OF_RESOURCES */
+} SCTP_PACKED;
+
+struct sctp_error_unresolv_addr {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_UNRESOLVABLE_ADDR */
+} SCTP_PACKED;
+
+struct sctp_error_unrecognized_chunk {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_UNRECOG_CHUNK */
+ struct sctp_chunkhdr ch;/* header from chunk in error */
+} SCTP_PACKED;
+
+struct sctp_error_no_user_data {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_NO_USER_DATA */
+ uint32_t tsn; /* TSN of the empty data chunk */
+} SCTP_PACKED;
+
+struct sctp_error_auth_invalid_hmac {
+ struct sctp_error_cause cause; /* code=SCTP_CAUSE_UNSUPPORTED_HMACID */
+ uint16_t hmac_id;
+} SCTP_PACKED;
+
+/*
+ * Main SCTP chunk types we place these here so natd and f/w's in user land
+ * can find them.
+ */
+/************0x00 series ***********/
+#define SCTP_DATA 0x00
+#define SCTP_INITIATION 0x01
+#define SCTP_INITIATION_ACK 0x02
+#define SCTP_SELECTIVE_ACK 0x03
+#define SCTP_HEARTBEAT_REQUEST 0x04
+#define SCTP_HEARTBEAT_ACK 0x05
+#define SCTP_ABORT_ASSOCIATION 0x06
+#define SCTP_SHUTDOWN 0x07
+#define SCTP_SHUTDOWN_ACK 0x08
+#define SCTP_OPERATION_ERROR 0x09
+#define SCTP_COOKIE_ECHO 0x0a
+#define SCTP_COOKIE_ACK 0x0b
+#define SCTP_ECN_ECHO 0x0c
+#define SCTP_ECN_CWR 0x0d
+#define SCTP_SHUTDOWN_COMPLETE 0x0e
+/* RFC4895 */
+#define SCTP_AUTHENTICATION 0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK 0x10
+/************0x40 series ***********/
+#define SCTP_IDATA 0x40
+/************0x80 series ***********/
+/* RFC5061 */
+#define SCTP_ASCONF_ACK 0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED 0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET 0x82
+
+/* RFC4820 */
+#define SCTP_PAD_CHUNK 0x84
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN 0xc0
+/* RFC5061 */
+#define SCTP_ASCONF 0xc1
+#define SCTP_IFORWARD_CUM_TSN 0xc2
+
+/* ABORT and SHUTDOWN COMPLETE FLAG */
+#define SCTP_HAD_NO_TCB 0x01
+
+/* Packet dropped flags */
+#define SCTP_FROM_MIDDLE_BOX SCTP_HAD_NO_TCB
+#define SCTP_BADCRC 0x02
+#define SCTP_PACKET_TRUNCATED 0x04
+
+/* Flag for ECN -CWR */
+#define SCTP_CWR_REDUCE_OVERRIDE 0x01
+#define SCTP_CWR_IN_SAME_WINDOW 0x02
+
+#define SCTP_SAT_NETWORK_MIN 400 /* min ms for RTT to set satellite
+ * time */
+#define SCTP_SAT_NETWORK_BURST_INCR 2 /* how many times to multiply maxburst
+ * in sat */
+
+/* Data Chuck Specific Flags */
+#define SCTP_DATA_FRAG_MASK 0x03
+#define SCTP_DATA_MIDDLE_FRAG 0x00
+#define SCTP_DATA_LAST_FRAG 0x01
+#define SCTP_DATA_FIRST_FRAG 0x02
+#define SCTP_DATA_NOT_FRAG 0x03
+#define SCTP_DATA_UNORDERED 0x04
+#define SCTP_DATA_SACK_IMMEDIATELY 0x08
+/* ECN Nonce: SACK Chunk Specific Flags */
+#define SCTP_SACK_NONCE_SUM 0x01
+
+/* CMT DAC algorithm SACK flag */
+#define SCTP_SACK_CMT_DAC 0x80
+
+/*
+ * PCB flags (in sctp_flags bitmask).
+ * Note the features and flags are meant
+ * for use by netstat.
+ */
+#define SCTP_PCB_FLAGS_UDPTYPE 0x00000001
+#define SCTP_PCB_FLAGS_TCPTYPE 0x00000002
+#define SCTP_PCB_FLAGS_BOUNDALL 0x00000004
+#define SCTP_PCB_FLAGS_ACCEPTING 0x00000008
+#define SCTP_PCB_FLAGS_UNBOUND 0x00000010
+#define SCTP_PCB_FLAGS_SND_ITERATOR_UP 0x00000020
+#define SCTP_PCB_FLAGS_CLOSE_IP 0x00040000
+#define SCTP_PCB_FLAGS_WAS_CONNECTED 0x00080000
+#define SCTP_PCB_FLAGS_WAS_ABORTED 0x00100000
+/* TCP model support */
+
+#define SCTP_PCB_FLAGS_CONNECTED 0x00200000
+#define SCTP_PCB_FLAGS_IN_TCPPOOL 0x00400000
+#define SCTP_PCB_FLAGS_DONT_WAKE 0x00800000
+#define SCTP_PCB_FLAGS_WAKEOUTPUT 0x01000000
+#define SCTP_PCB_FLAGS_WAKEINPUT 0x02000000
+#define SCTP_PCB_FLAGS_BOUND_V6 0x04000000
+#define SCTP_PCB_FLAGS_BLOCKING_IO 0x08000000
+#define SCTP_PCB_FLAGS_SOCKET_GONE 0x10000000
+#define SCTP_PCB_FLAGS_SOCKET_ALLGONE 0x20000000
+#define SCTP_PCB_FLAGS_SOCKET_CANT_READ 0x40000000
+#if defined(__Userspace__)
+#define SCTP_PCB_FLAGS_BOUND_CONN 0x80000000
+
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS (SCTP_PCB_FLAGS_BOUNDALL|\
+ SCTP_PCB_FLAGS_WAKEINPUT|\
+ SCTP_PCB_FLAGS_BOUND_V6|\
+ SCTP_PCB_FLAGS_BOUND_CONN)
+#else
+
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS (SCTP_PCB_FLAGS_BOUNDALL|\
+ SCTP_PCB_FLAGS_WAKEINPUT|\
+ SCTP_PCB_FLAGS_BOUND_V6)
+#endif
+
+/*
+ * PCB Features (in sctp_features bitmask)
+ */
+#define SCTP_PCB_FLAGS_DO_NOT_PMTUD 0x0000000000000001
+#define SCTP_PCB_FLAGS_EXT_RCVINFO 0x0000000000000002 /* deprecated */
+#define SCTP_PCB_FLAGS_DONOT_HEARTBEAT 0x0000000000000004
+#define SCTP_PCB_FLAGS_FRAG_INTERLEAVE 0x0000000000000008
+#define SCTP_PCB_FLAGS_INTERLEAVE_STRMS 0x0000000000000010
+#define SCTP_PCB_FLAGS_DO_ASCONF 0x0000000000000020
+#define SCTP_PCB_FLAGS_AUTO_ASCONF 0x0000000000000040
+/* socket options */
+#define SCTP_PCB_FLAGS_NODELAY 0x0000000000000100
+#define SCTP_PCB_FLAGS_AUTOCLOSE 0x0000000000000200
+#define SCTP_PCB_FLAGS_RECVDATAIOEVNT 0x0000000000000400 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVASSOCEVNT 0x0000000000000800
+#define SCTP_PCB_FLAGS_RECVPADDREVNT 0x0000000000001000
+#define SCTP_PCB_FLAGS_RECVPEERERR 0x0000000000002000
+#define SCTP_PCB_FLAGS_RECVSENDFAILEVNT 0x0000000000004000 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT 0x0000000000008000
+#define SCTP_PCB_FLAGS_ADAPTATIONEVNT 0x0000000000010000
+#define SCTP_PCB_FLAGS_PDAPIEVNT 0x0000000000020000
+#define SCTP_PCB_FLAGS_AUTHEVNT 0x0000000000040000
+#define SCTP_PCB_FLAGS_STREAM_RESETEVNT 0x0000000000080000
+#define SCTP_PCB_FLAGS_NO_FRAGMENT 0x0000000000100000
+#define SCTP_PCB_FLAGS_EXPLICIT_EOR 0x0000000000400000
+#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x0000000000800000
+#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS 0x0000000001000000
+#define SCTP_PCB_FLAGS_PORTREUSE 0x0000000002000000
+#define SCTP_PCB_FLAGS_DRYEVNT 0x0000000004000000
+#define SCTP_PCB_FLAGS_RECVRCVINFO 0x0000000008000000
+#define SCTP_PCB_FLAGS_RECVNXTINFO 0x0000000010000000
+#define SCTP_PCB_FLAGS_ASSOC_RESETEVNT 0x0000000020000000
+#define SCTP_PCB_FLAGS_STREAM_CHANGEEVNT 0x0000000040000000
+#define SCTP_PCB_FLAGS_RECVNSENDFAILEVNT 0x0000000080000000
+
+/*-
+ * mobility_features parameters (by micchie).Note
+ * these features are applied against the
+ * sctp_mobility_features flags.. not the sctp_features
+ * flags.
+ */
+#define SCTP_MOBILITY_BASE 0x00000001
+#define SCTP_MOBILITY_FASTHANDOFF 0x00000002
+#define SCTP_MOBILITY_PRIM_DELETED 0x00000004
+
+
+/* Smallest PMTU allowed when disabling PMTU discovery */
+#define SCTP_SMALLEST_PMTU 512
+/* Largest PMTU allowed when disabling PMTU discovery */
+#define SCTP_LARGEST_PMTU 65536
+
+#if defined(_WIN32)
+#pragma pack(pop)
+#endif
+#undef SCTP_PACKED
+
+#include <netinet/sctp_uio.h>
+
+/* This dictates the size of the packet
+ * collection buffer. This only applies
+ * if SCTP_PACKET_LOGGING is enabled in
+ * your config.
+ */
+#define SCTP_PACKET_LOG_SIZE 65536
+
+/* Maximum delays and such a user can set for options that
+ * take ms.
+ */
+#define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */
+#define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */
+#define SCTP_MAX_COOKIE_LIFE 3600000 /* 1 hour in ms */
+
+
+/* Types of logging/KTR tracing that can be enabled via the
+ * sysctl net.inet.sctp.sctp_logging. You must also enable
+ * SUBSYS tracing.
+ * Note that you must have the SCTP option in the kernel
+ * to enable these as well.
+ */
+#define SCTP_BLK_LOGGING_ENABLE 0x00000001
+#define SCTP_CWND_MONITOR_ENABLE 0x00000002
+#define SCTP_CWND_LOGGING_ENABLE 0x00000004
+#define SCTP_FLIGHT_LOGGING_ENABLE 0x00000020
+#define SCTP_FR_LOGGING_ENABLE 0x00000040
+#define SCTP_LOCK_LOGGING_ENABLE 0x00000080
+#define SCTP_MAP_LOGGING_ENABLE 0x00000100
+#define SCTP_MBCNT_LOGGING_ENABLE 0x00000200
+#define SCTP_MBUF_LOGGING_ENABLE 0x00000400
+#define SCTP_NAGLE_LOGGING_ENABLE 0x00000800
+#define SCTP_RECV_RWND_LOGGING_ENABLE 0x00001000
+#define SCTP_RTTVAR_LOGGING_ENABLE 0x00002000
+#define SCTP_SACK_LOGGING_ENABLE 0x00004000
+#define SCTP_SACK_RWND_LOGGING_ENABLE 0x00008000
+#define SCTP_SB_LOGGING_ENABLE 0x00010000
+#define SCTP_STR_LOGGING_ENABLE 0x00020000
+#define SCTP_WAKE_LOGGING_ENABLE 0x00040000
+#define SCTP_LOG_MAXBURST_ENABLE 0x00080000
+#define SCTP_LOG_RWND_ENABLE 0x00100000
+#define SCTP_LOG_SACK_ARRIVALS_ENABLE 0x00200000
+#define SCTP_LTRACE_CHUNK_ENABLE 0x00400000
+#define SCTP_LTRACE_ERROR_ENABLE 0x00800000
+#define SCTP_LAST_PACKET_TRACING 0x01000000
+#define SCTP_THRESHOLD_LOGGING 0x02000000
+#define SCTP_LOG_AT_SEND_2_SCTP 0x04000000
+#define SCTP_LOG_AT_SEND_2_OUTQ 0x08000000
+#define SCTP_LOG_TRY_ADVANCE 0x10000000
+
+#endif /* !_NETINET_SCTP_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.c
new file mode 100644
index 000000000..81e2acfd5
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.c
@@ -0,0 +1,3524 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 363194 2020-07-14 20:32:50Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_timer.h>
+
+/*
+ * debug flags:
+ * SCTP_DEBUG_ASCONF1: protocol info, general info and errors
+ * SCTP_DEBUG_ASCONF2: detailed info
+ */
+
+/*
+ * RFC 5061
+ *
+ * An ASCONF parameter queue exists per asoc which holds the pending address
+ * operations. Lists are updated upon receipt of ASCONF-ACK.
+ *
+ * A restricted_addrs list exists per assoc to hold local addresses that are
+ * not (yet) usable by the assoc as a source address. These addresses are
+ * either pending an ASCONF operation (and exist on the ASCONF parameter
+ * queue), or they are permanently restricted (the peer has returned an
+ * ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF).
+ *
+ * Deleted addresses are always immediately removed from the lists as they will
+ * (shortly) no longer exist in the kernel. We send ASCONFs as a courtesy,
+ * only if allowed.
+ */
+
+/*
+ * ASCONF parameter processing.
+ * response_required: set if a reply is required (eg. SUCCESS_REPORT).
+ * returns a mbuf to an "error" response parameter or NULL/"success" if ok.
+ * FIX: allocating this many mbufs on the fly is pretty inefficient...
+ */
+static struct mbuf *
+sctp_asconf_success_response(uint32_t id)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+
+ m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr),
+ 0, M_NOWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_success_response: couldn't get mbuf!\n");
+ return (NULL);
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ aph->correlation_id = id;
+ aph->ph.param_type = htons(SCTP_SUCCESS_REPORT);
+ aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr);
+ SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+ aph->ph.param_length = htons(aph->ph.param_length);
+
+ return (m_reply);
+}
+
+static struct mbuf *
+sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t *error_tlv,
+ uint16_t tlv_length)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_error_cause *error;
+ uint32_t buf_len;
+ uint16_t i, param_length, cause_length, padding_length;
+ uint8_t *tlv;
+
+ if (error_tlv == NULL) {
+ tlv_length = 0;
+ }
+ cause_length = sizeof(struct sctp_error_cause) + tlv_length;
+ param_length = sizeof(struct sctp_asconf_paramhdr) + cause_length;
+ padding_length = tlv_length % 4;
+ if (padding_length != 0) {
+ padding_length = 4 - padding_length;
+ }
+ buf_len = param_length + padding_length;
+ if (buf_len > MLEN) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_error_response: tlv_length (%xh) too big\n",
+ tlv_length);
+ return (NULL);
+ }
+ m_reply = sctp_get_mbuf_for_msg(buf_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_error_response: couldn't get mbuf!\n");
+ return (NULL);
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND);
+ aph->ph.param_length = htons(param_length);
+ aph->correlation_id = id;
+ error = (struct sctp_error_cause *)(aph + 1);
+ error->code = htons(cause);
+ error->length = htons(cause_length);
+ if (error_tlv != NULL) {
+ tlv = (uint8_t *) (error + 1);
+ memcpy(tlv, error_tlv, tlv_length);
+ for (i = 0; i < padding_length; i++) {
+ tlv[tlv_length + i] = 0;
+ }
+ }
+ SCTP_BUF_LEN(m_reply) = buf_len;
+ return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_add_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int send_hb, int response_required)
+{
+ struct sctp_nets *net;
+ struct mbuf *m_reply = NULL;
+ union sctp_sockstore store;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+ uint16_t param_length;
+#endif
+ struct sockaddr *sa;
+ int zero_address = 0;
+ int bad_address = 0;
+#ifdef INET
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+#endif
+
+ aparam_length = ntohs(aph->ph.param_length);
+ if (aparam_length < sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_paramhdr)) {
+ return (NULL);
+ }
+ ph = (struct sctp_paramhdr *)(aph + 1);
+ param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+ param_length = ntohs(ph->param_length);
+ if (param_length + sizeof(struct sctp_asconf_paramhdr) != aparam_length) {
+ return (NULL);
+ }
+#endif
+ sa = &store.sa;
+ switch (param_type) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v4addr = (struct sctp_ipv4addr_param *)ph;
+ sin = &store.sin;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if ((sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+ bad_address = 1;
+ }
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v6addr = (struct sctp_ipv6addr_param *)ph;
+ sin6 = &store.sin6;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6->sin6_port = stcb->rport;
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+ bad_address = 1;
+ }
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+ aparam_length);
+ return (m_reply);
+ } /* end switch */
+
+ /* if 0.0.0.0/::0, add the source address instead */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ sa = src;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_add_ip: using source addr ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+ }
+ net = NULL;
+ /* add the address */
+ if (bad_address) {
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+ aparam_length);
+ } else if (sctp_add_remote_addr(stcb, sa, &net, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE,
+ SCTP_ADDR_DYNAMIC_ADDED) != 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_add_ip: error adding address\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ if (net != NULL) {
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, net);
+ if (send_hb) {
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ }
+ return (m_reply);
+}
+
+static int
+sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb, struct sockaddr *src)
+{
+ struct sctp_nets *src_net, *net, *nnet;
+
+ /* make sure the source address exists as a destination net */
+ src_net = sctp_findnet(stcb, src);
+ if (src_net == NULL) {
+ /* not found */
+ return (-1);
+ }
+
+ /* delete all destination addresses except the source */
+ TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) {
+ if (net != src_net) {
+ /* delete this address */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_del_remote_addrs_except: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1,
+ (struct sockaddr *)&net->ro._l_addr);
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0,
+ (struct sockaddr *)&net->ro._l_addr, SCTP_SO_NOT_LOCKED);
+ sctp_remove_net(stcb, net);
+ }
+ }
+ return (0);
+}
+
+static struct mbuf *
+sctp_process_asconf_delete_ip(struct sockaddr *src,
+ struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ union sctp_sockstore store;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+ uint16_t param_length;
+#endif
+ struct sockaddr *sa;
+ int zero_address = 0;
+ int result;
+#ifdef INET
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+#endif
+
+ aparam_length = ntohs(aph->ph.param_length);
+ if (aparam_length < sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_paramhdr)) {
+ return (NULL);
+ }
+ ph = (struct sctp_paramhdr *)(aph + 1);
+ param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+ param_length = ntohs(ph->param_length);
+ if (param_length + sizeof(struct sctp_asconf_paramhdr) != aparam_length) {
+ return (NULL);
+ }
+#endif
+ sa = &store.sa;
+ switch (param_type) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v4addr = (struct sctp_ipv4addr_param *)ph;
+ sin = &store.sin;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_delete_ip: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v6addr = (struct sctp_ipv6addr_param *)ph;
+ sin6 = &store.sin6;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6->sin6_port = stcb->rport;
+ memcpy(&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_delete_ip: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return (m_reply);
+ }
+
+ /* make sure the source address is not being deleted */
+ if (sctp_cmpaddr(sa, src)) {
+ /* trying to delete the source address! */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete source addr\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return (m_reply);
+ }
+
+ /* if deleting 0.0.0.0/::0, delete all addresses except src addr */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ result = sctp_asconf_del_remote_addrs_except(stcb, src);
+
+ if (result) {
+ /* src address did not exist? */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: src addr does not exist?\n");
+ /* what error to reply with?? */
+ m_reply =
+ sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *) aph,
+ aparam_length);
+ } else if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ return (m_reply);
+ }
+
+ /* delete the address */
+ result = sctp_del_remote_addr(stcb, sa);
+ /*
+ * note if result == -2, the address doesn't exist in the asoc but
+ * since it's being deleted anyways, we just ack the delete -- but
+ * this probably means something has already gone awry
+ */
+ if (result == -1) {
+ /* only one address in the asoc */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete last IP addr!\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+ }
+ return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_set_primary(struct sockaddr *src,
+ struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ union sctp_sockstore store;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+ uint16_t param_length;
+#endif
+ struct sockaddr *sa;
+ int zero_address = 0;
+#ifdef INET
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+#endif
+
+ aparam_length = ntohs(aph->ph.param_length);
+ if (aparam_length < sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_paramhdr)) {
+ return (NULL);
+ }
+ ph = (struct sctp_paramhdr *)(aph + 1);
+ param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+ param_length = ntohs(ph->param_length);
+ if (param_length + sizeof(struct sctp_asconf_paramhdr) != aparam_length) {
+ return (NULL);
+ }
+#endif
+ sa = &store.sa;
+ switch (param_type) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v4addr = (struct sctp_ipv4addr_param *)ph;
+ sin = &store.sin;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return (NULL);
+ }
+ v6addr = (struct sctp_ipv6addr_param *)ph;
+ sin6 = &store.sin6;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+#endif
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return (m_reply);
+ }
+
+ /* if 0.0.0.0/::0, use the source address instead */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ sa = src;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: using source addr ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+ }
+ /* set the primary address */
+ if (sctp_set_primary_addr(stcb, sa, NULL) == 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: primary address set\n");
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+ if ((stcb->asoc.primary_destination->dest_state & SCTP_ADDR_REACHABLE) &&
+ (!(stcb->asoc.primary_destination->dest_state & SCTP_ADDR_PF)) &&
+ (stcb->asoc.alternate)) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ /* Mobility adaptation.
+ Ideally, when the reception of SET PRIMARY with DELETE IP
+ ADDRESS of the previous primary destination, unacknowledged
+ DATA are retransmitted immediately to the new primary
+ destination for seamless handover.
+ If the destination is UNCONFIRMED and marked to REQ_PRIM,
+ The retransmission occur when reception of the
+ HEARTBEAT-ACK. (See sctp_handle_heartbeat_ack in
+ sctp_input.c)
+ Also, when change of the primary destination, it is better
+ that all subsequent new DATA containing already queued DATA
+ are transmitted to the new primary destination. (by micchie)
+ */
+ if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED) &&
+ (stcb->asoc.primary_destination->dest_state &
+ SCTP_ADDR_UNCONFIRMED) == 0) {
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
+ stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1);
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_assoc_immediate_retrans(stcb,
+ stcb->asoc.primary_destination);
+ }
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE)) {
+ sctp_move_chunks_from_net(stcb,
+ stcb->asoc.deleted_primary);
+ }
+ sctp_delete_prim_timer(stcb->sctp_ep, stcb);
+ }
+ } else {
+ /* couldn't set the requested primary address! */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: set primary failed!\n");
+ /* must have been an invalid address, so report */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ }
+
+ return (m_reply);
+}
+
+/*
+ * handles an ASCONF chunk.
+ * if all parameters are processed ok, send a plain (empty) ASCONF-ACK
+ */
+void
+sctp_handle_asconf(struct mbuf *m, unsigned int offset,
+ struct sockaddr *src,
+ struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb,
+ int first)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ struct mbuf *n, *m_ack, *m_result, *m_tail;
+ struct sctp_asconf_ack_chunk *ack_cp;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_ipv6addr_param *p_addr;
+ unsigned int asconf_limit, cnt;
+ int error = 0; /* did an error occur? */
+
+ /* asconf param buffer */
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_asconf_ack *ack, *ack_next;
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ if (SCTP_TSN_GE(asoc->asconf_seq_in, serial_num)) {
+ /* got a duplicate ASCONF */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: got duplicate serial number = %xh\n",
+ serial_num);
+ return;
+ } else if (serial_num != (asoc->asconf_seq_in + 1)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
+ serial_num, asoc->asconf_seq_in + 1);
+ return;
+ }
+
+ /* it's the expected "next" sequence number, so process it */
+ asoc->asconf_seq_in = serial_num; /* update sequence */
+ /* get length of all the param's in the ASCONF */
+ asconf_limit = offset + ntohs(cp->ch.chunk_length);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: asconf_limit=%u, sequence=%xh\n",
+ asconf_limit, serial_num);
+
+ if (first) {
+ /* delete old cache */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: Now processing first ASCONF. Try to delete old cache\n");
+
+ TAILQ_FOREACH_SAFE(ack, &asoc->asconf_ack_sent, next, ack_next) {
+ if (ack->serial_number == serial_num)
+ break;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: delete old(%u) < first(%u)\n",
+ ack->serial_number, serial_num);
+ TAILQ_REMOVE(&asoc->asconf_ack_sent, ack, next);
+ if (ack->data != NULL) {
+ sctp_m_freem(ack->data);
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack);
+ }
+ }
+
+ m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0,
+ M_NOWAIT, 1, MT_DATA);
+ if (m_ack == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: couldn't get mbuf!\n");
+ return;
+ }
+ m_tail = m_ack; /* current reply chain's tail */
+
+ /* fill in ASCONF-ACK header */
+ ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_ASCONF_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->serial_number = htonl(serial_num);
+ /* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */
+ SCTP_BUF_LEN(m_ack) = sizeof(struct sctp_asconf_ack_chunk);
+ ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk);
+
+ /* skip the lookup address parameter */
+ offset += sizeof(struct sctp_asconf_chunk);
+ p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&aparam_buf);
+ if (p_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: couldn't get lookup addr!\n");
+ /* respond with a missing/invalid mandatory parameter error */
+ sctp_m_freem(m_ack);
+ return;
+ }
+ /* skip lookup addr */
+ offset += SCTP_SIZE32(ntohs(p_addr->ph.param_length));
+ /* get pointer to first asconf param in ASCONF */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *)&aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Empty ASCONF received?\n");
+ goto send_reply;
+ }
+ /* process through all parameters */
+ cnt = 0;
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ sctp_m_freem(m_ack);
+ return;
+ }
+ m_result = NULL;
+
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) larger than buffer size!\n", param_length);
+ sctp_m_freem(m_ack);
+ return;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length);
+ sctp_m_freem(m_ack);
+ return;
+ }
+ /* get the entire parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get entire param\n");
+ sctp_m_freem(m_ack);
+ return;
+ }
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ m_result = sctp_process_asconf_add_ip(src, aph, stcb,
+ (cnt < SCTP_BASE_SYSCTL(sctp_hb_maxburst)), error);
+ cnt++;
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ m_result = sctp_process_asconf_delete_ip(src, aph, stcb,
+ error);
+ break;
+ case SCTP_ERROR_CAUSE_IND:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ m_result = sctp_process_asconf_set_primary(src, aph,
+ stcb, error);
+ break;
+ case SCTP_NAT_VTAGS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_ULP_ADAPTATION:
+ /* FIX */
+ break;
+ default:
+ if ((param_type & 0x8000) == 0) {
+ /* Been told to STOP at this param */
+ asconf_limit = offset;
+ /*
+ * FIX FIX - We need to call
+ * sctp_arethere_unrecognized_parameters()
+ * to get a operr and send it for any
+ * param's with the 0x4000 bit set OR do it
+ * here ourselves... note we still must STOP
+ * if the 0x8000 bit is clear.
+ */
+ }
+ /* unknown/invalid param type */
+ break;
+ } /* switch */
+
+ /* add any (error) result to the reply mbuf chain */
+ if (m_result != NULL) {
+ SCTP_BUF_NEXT(m_tail) = m_result;
+ m_tail = m_result;
+ ack_cp->ch.chunk_length += SCTP_BUF_LEN(m_result);
+ /* set flag to force success reports */
+ error = 1;
+ }
+ offset += SCTP_SIZE32(param_length);
+ /* update remaining ASCONF message length to process */
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr),
+ (uint8_t *)&aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: can't get asconf param hdr!\n");
+ /* FIX ME - add error here... */
+ }
+ }
+
+ send_reply:
+ ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
+ /* save the ASCONF-ACK reply */
+ ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack),
+ struct sctp_asconf_ack);
+ if (ack == NULL) {
+ sctp_m_freem(m_ack);
+ return;
+ }
+ ack->serial_number = serial_num;
+ ack->last_sent_to = NULL;
+ ack->data = m_ack;
+ ack->len = 0;
+ for (n = m_ack; n != NULL; n = SCTP_BUF_NEXT(n)) {
+ ack->len += SCTP_BUF_LEN(n);
+ }
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next);
+
+ /* see if last_control_chunk_from is set properly (use IP src addr) */
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ /*
+ * this could happen if the source address was just newly
+ * added
+ */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: looking up net for IP source address\n");
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Looking for IP source: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+ /* look up the from address */
+ stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, src);
+#ifdef SCTP_DEBUG
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: IP source address not found?!\n");
+ }
+#endif
+ }
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
+{
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+ return (0);
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_addr_match(struct sctp_paramhdr *ph, struct sockaddr *sa)
+{
+#if defined(INET) || defined(INET6)
+ uint16_t param_type, param_length;
+
+ param_type = ntohs(ph->param_type);
+ param_length = ntohs(ph->param_length);
+#endif
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+ struct sctp_ipv6addr_param *v6addr;
+
+ v6addr = (struct sctp_ipv6addr_param *)ph;
+ if ((param_type == SCTP_IPV6_ADDRESS) &&
+ (param_length == sizeof(struct sctp_ipv6addr_param)) &&
+ (memcmp(&v6addr->addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+ struct sctp_ipv4addr_param *v4addr;
+
+ v4addr = (struct sctp_ipv4addr_param *)ph;
+ if ((param_type == SCTP_IPV4_ADDRESS) &&
+ (param_length == sizeof(struct sctp_ipv4addr_param)) &&
+ (memcmp(&v4addr->addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+ return (0);
+}
+/*
+ * Cleanup for non-responded/OP ERR'd ASCONF
+ */
+void
+sctp_asconf_cleanup(struct sctp_tcb *stcb)
+{
+ /*
+ * clear out any existing asconfs going out
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2);
+ stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+}
+
+/*
+ * cleanup any cached source addresses that may be topologically
+ * incorrect after a new address has been added to this interface.
+ */
+static void
+sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
+{
+ struct sctp_nets *net;
+
+ /*
+ * Ideally, we want to only clear cached routes and source addresses
+ * that are topologically incorrect. But since there is no easy way
+ * to know whether the newly added address on the ifn would cause a
+ * routing change (i.e. a new egress interface would be chosen)
+ * without doing a new routing lookup and source address selection,
+ * we will (for now) just flush any cached route using a different
+ * ifn (and cached source addrs) and let output re-choose them during
+ * the next send on that net.
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /*
+ * clear any cached route (and cached source address) if the
+ * route's interface is NOT the same as the address change.
+ * If it's the same interface, just clear the cached source
+ * address.
+ */
+ if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) &&
+ ((ifn == NULL) ||
+ (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) {
+ /* clear any cached route */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+#endif
+ }
+ /* clear any cached source address */
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ }
+}
+
+
+void
+sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet)
+{
+ int error;
+
+ if (dstnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ return;
+ }
+ if (stcb->asoc.deleted_primary == NULL) {
+ return;
+ }
+
+ if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ error = sctp_t3rxt_timer(stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary);
+ if (error) {
+ SCTP_INP_DECR_REF(stcb->sctp_ep);
+ return;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary);
+#endif
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ if ((stcb->asoc.num_send_timers_up == 0) &&
+ (stcb->asoc.sent_queue_cnt > 0)) {
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (chk != NULL) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+ }
+ }
+ }
+ return;
+}
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t);
+
+void
+sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_tmit_chunk *chk;
+
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_4);
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ net->error_count = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ net->marked_retrans++;
+ stcb->asoc.marked_retrans++;
+ }
+ }
+ }
+ if (net->marked_retrans) {
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ }
+}
+
+static void
+sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
+{
+ struct sctp_nets *net;
+ int addrnum, changed;
+
+ /* If number of local valid addresses is 1, the valid address is
+ probably newly added address.
+ Several valid addresses in this association. A source address
+ may not be changed. Additionally, they can be configured on a
+ same interface as "alias" addresses. (by micchie)
+ */
+ addrnum = sctp_local_addr_count(stcb);
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n",
+ addrnum);
+ if (addrnum == 1) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* clear any cached route and source address */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+#endif
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ /* Retransmit unacknowledged DATA chunks immediately */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_net_immediate_retrans(stcb, net);
+ }
+ /* also, SET PRIMARY is maybe already sent */
+ }
+ return;
+ }
+
+ /* Multiple local addresses exsist in the association. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* clear any cached route and source address */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+#endif
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ /* Check if the nexthop is corresponding to the new address.
+ If the new address is corresponding to the current nexthop,
+ the path will be changed.
+ If the new address is NOT corresponding to the current
+ nexthop, the path will not be changed.
+ */
+ SCTP_RTALLOC((sctp_route_t *)&net->ro,
+ stcb->sctp_ep->def_vrf_id,
+ stcb->sctp_ep->fibnum);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net->ro.ro_nh == NULL)
+#else
+ if (net->ro.ro_rt == NULL)
+#endif
+ continue;
+
+ changed = 0;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *)&net->ro)) {
+ changed = 1;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (sctp_v6src_match_nexthop(
+ &newifa->address.sin6, (sctp_route_t *)&net->ro)) {
+ changed = 1;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ /* if the newly added address does not relate routing
+ information, we skip.
+ */
+ if (changed == 0)
+ continue;
+ /* Retransmit unacknowledged DATA chunks immediately */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_net_immediate_retrans(stcb, net);
+ }
+ /* Send SET PRIMARY for this new address */
+ if (net == stcb->asoc.primary_destination) {
+ (void)sctp_asconf_queue_mgmt(stcb, newifa,
+ SCTP_SET_PRIM_ADDR);
+ }
+ }
+}
+#endif
+
+/*
+ * process an ADD/DELETE IP ack from peer.
+ * addr: corresponding sctp_ifa to the address being added/deleted.
+ * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS.
+ * flag: 1=success, 0=failure.
+ */
+static void
+sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr, uint32_t flag)
+{
+ /*
+ * do the necessary asoc list work- if we get a failure indication,
+ * leave the address on the assoc's restricted list. If we get a
+ * success indication, remove the address from the restricted list.
+ */
+ /*
+ * Note: this will only occur for ADD_IP_ADDRESS, since
+ * DEL_IP_ADDRESS is never actually added to the list...
+ */
+ if (flag) {
+ /* success case, so remove from the restricted list */
+ sctp_del_local_addr_restricted(stcb, addr);
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_path_check_and_react(stcb, addr);
+ return;
+ }
+#endif
+ /* clear any cached/topologically incorrect source addresses */
+ sctp_asconf_nets_cleanup(stcb, addr->ifn_p);
+ }
+ /* else, leave it on the list */
+}
+
+/*
+ * add an asconf add/delete/set primary IP address parameter to the queue.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if queued, -1 if not queued/removed.
+ * NOTE: if adding, but a delete for the same address is already scheduled
+ * (and not yet sent out), simply remove it from queue. Same for deleting
+ * an address already scheduled for add. If a duplicate operation is found,
+ * ignore the new one.
+ */
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+ uint16_t type)
+{
+ struct sctp_asconf_addr *aa, *aa_next;
+
+ /* make sure the request isn't already in the queue */
+ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0)
+ continue;
+ /* is the request already in queue but not sent?
+ * pass the request already sent in order to resolve the following case:
+ * 1. arrival of ADD, then sent
+ * 2. arrival of DEL. we can't remove the ADD request already sent
+ * 3. arrival of ADD
+ */
+ if (aa->ap.aph.ph.param_type == type && aa->sent == 0) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if ((aa->sent == 0) && (type == SCTP_ADD_IP_ADDRESS) &&
+ (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS)) {
+ /* add requested, delete already queued */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* remove the ifa from the restricted list */
+ sctp_del_local_addr_restricted(stcb, ifa);
+ /* free the asconf param */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: add removes queued entry\n");
+ return (-1);
+ }
+ if ((aa->sent == 0) && (type == SCTP_DEL_IP_ADDRESS) &&
+ (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS)) {
+ /* delete requested, add already queued */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* remove the aa->ifa from the restricted list */
+ sctp_del_local_addr_restricted(stcb, aa->ifa);
+ /* free the asconf param */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: delete removes queued entry\n");
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
+ return (-1);
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = type;
+ aa->ifa = ifa;
+ atomic_add_int(&ifa->refcount, 1);
+ /* correlation_id filled in during send routine later... */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = &ifa->address.sin6;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = &ifa->address.sin;
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ break;
+ }
+#endif
+ default:
+ /* invalid family! */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ sctp_free_ifa(ifa);
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_ASCONF2) {
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+ } else {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+ }
+ }
+#endif
+
+ return (0);
+}
+
+
+/*
+ * add an asconf operation for the given ifa and type.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, -1 if not completed, 1 if immediate send is
+ * advisable.
+ */
+static int
+sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+ uint16_t type)
+{
+ uint32_t status;
+ int pending_delete_queued = 0;
+ int last;
+
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.asconf_supported == 0) {
+ return (-1);
+ }
+
+ /*
+ * if this is deleting the last address from the assoc, mark it as
+ * pending.
+ */
+ if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending) {
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ last = (sctp_local_addr_count(stcb) == 0);
+ } else {
+ last = (sctp_local_addr_count(stcb) == 1);
+ }
+ if (last) {
+ /* set the pending delete info only */
+ stcb->asoc.asconf_del_pending = 1;
+ stcb->asoc.asconf_addr_del_pending = ifa;
+ atomic_add_int(&ifa->refcount, 1);
+ SCTPDBG(SCTP_DEBUG_ASCONF2,
+ "asconf_queue_add: mark delete last address pending\n");
+ return (-1);
+ }
+ }
+
+ /* queue an asconf parameter */
+ status = sctp_asconf_queue_mgmt(stcb, ifa, type);
+
+ /*
+ * if this is an add, and there is a delete also pending (i.e. the
+ * last local address is being changed), queue the pending delete too.
+ */
+ if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) {
+ /* queue in the pending delete */
+ if (sctp_asconf_queue_mgmt(stcb,
+ stcb->asoc.asconf_addr_del_pending,
+ SCTP_DEL_IP_ADDRESS) == 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: queuing pending delete\n");
+ pending_delete_queued = 1;
+ /* clear out the pending delete info */
+ stcb->asoc.asconf_del_pending = 0;
+ sctp_free_ifa(stcb->asoc.asconf_addr_del_pending);
+ stcb->asoc.asconf_addr_del_pending = NULL;
+ }
+ }
+
+ if (pending_delete_queued) {
+ struct sctp_nets *net;
+ /*
+ * since we know that the only/last address is now being
+ * changed in this case, reset the cwnd/rto on all nets to
+ * start as a new address and path. Also clear the error
+ * counts to give the assoc the best chance to complete the
+ * address change.
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb,
+ net);
+ net->RTO = 0;
+ net->error_count = 0;
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_ASCONF,
+ __LINE__);
+ }
+
+ /* queue in an advisory set primary too */
+ (void)sctp_asconf_queue_mgmt(stcb, ifa, SCTP_SET_PRIM_ADDR);
+ /* let caller know we should send this out immediately */
+ status = 1;
+ }
+ return (status);
+}
+
+/*-
+ * add an asconf delete IP address parameter to the queue by sockaddr and
+ * possibly with no sctp_ifa available. This is only called by the routine
+ * that checks the addresses in an INIT-ACK against the current address list.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if an add is already scheduled (and not yet sent out), simply
+ * remove it from queue. If a duplicate operation is found, ignore the
+ * new one.
+ */
+static int
+sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sctp_ifa *ifa;
+ struct sctp_asconf_addr *aa, *aa_next;
+
+ if (stcb == NULL) {
+ return (-1);
+ }
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.asconf_supported == 0) {
+ return (-1);
+ }
+ /* make sure the request isn't already in the queue */
+ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, sa) == 0)
+ continue;
+ /* is the request already in queue (sent or not) */
+ if (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if (aa->sent == 1)
+ continue;
+ if (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) {
+ /* add already queued, so remove existing entry */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ sctp_del_local_addr_restricted(stcb, aa->ifa);
+ /* free the entry */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* find any existing ifa-- NOTE ifa CAN be allowed to be NULL */
+ ifa = sctp_find_ifa_by_addr(sa, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_queue_sa_delete: failed to get memory!\n");
+ return (-1);
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+ aa->ifa = ifa;
+ if (ifa)
+ atomic_add_int(&ifa->refcount, 1);
+ /* correlation_id filled in during send routine later... */
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ /* IPv4 address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ break;
+ }
+#endif
+ default:
+ /* invalid family! */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ if (ifa)
+ sctp_free_ifa(ifa);
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ /* delete goes to the back of the queue */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+ /* sa_ignore MEMLEAK {memory is put on the tailq} */
+ return (0);
+}
+
+/*
+ * find a specific asconf param on our "sent" queue
+ */
+static struct sctp_asconf_addr *
+sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id)
+{
+ struct sctp_asconf_addr *aa;
+
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->ap.aph.correlation_id == correlation_id &&
+ aa->sent == 1) {
+ /* found it */
+ return (aa);
+ }
+ }
+ /* didn't find it */
+ return (NULL);
+}
+
+/*
+ * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do
+ * notifications based on the error response
+ */
+static void
+sctp_asconf_process_error(struct sctp_tcb *stcb SCTP_UNUSED,
+ struct sctp_asconf_paramhdr *aph)
+{
+ struct sctp_error_cause *eh;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type;
+ uint16_t error_code;
+
+ eh = (struct sctp_error_cause *)(aph + 1);
+ ph = (struct sctp_paramhdr *)(eh + 1);
+ /* validate lengths */
+ if (htons(eh->length) + sizeof(struct sctp_error_cause) >
+ htons(aph->ph.param_length)) {
+ /* invalid error cause length */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_process_error: cause element too long\n");
+ return;
+ }
+ if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) >
+ htons(eh->length)) {
+ /* invalid included TLV length */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_process_error: included TLV too long\n");
+ return;
+ }
+ /* which error code ? */
+ error_code = ntohs(eh->code);
+ param_type = ntohs(aph->ph.param_type);
+ /* FIX: this should go back up the REMOTE_ERROR ULP notify */
+ switch (error_code) {
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ /* we allow ourselves to "try again" for this error */
+ break;
+ default:
+ /* peer can't handle it... */
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_SET_PRIM_ADDR:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * process an asconf queue param.
+ * aparam: parameter to process, will be removed from the queue.
+ * flag: 1=success case, 0=failure case
+ */
+static void
+sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
+ struct sctp_asconf_addr *aparam, uint32_t flag)
+{
+ uint16_t param_type;
+
+ /* process this param */
+ param_type = aparam->ap.aph.ph.param_type;
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: added IP address\n");
+ sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, flag);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: deleted IP address\n");
+ /* nothing really to do... lists already updated */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: set primary IP address\n");
+ /* nothing to do... peer may start using this addr */
+ break;
+ default:
+ /* should NEVER happen */
+ break;
+ }
+
+ /* remove the param and free it */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next);
+ if (aparam->ifa)
+ sctp_free_ifa(aparam->ifa);
+ SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+}
+
+/*
+ * cleanup from a bad asconf ack parameter
+ */
+static void
+sctp_asconf_ack_clear(struct sctp_tcb *stcb SCTP_UNUSED)
+{
+ /* assume peer doesn't really know how to do asconfs */
+ /* XXX we could free the pending queue here */
+
+}
+
+void
+sctp_handle_asconf_ack(struct mbuf *m, int offset,
+ struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ uint16_t ack_length;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr *aa, *aa_next;
+ uint32_t last_error_id = 0; /* last error correlation id */
+ uint32_t id;
+ struct sctp_asconf_addr *ap;
+
+ /* asconf param buffer */
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf_ack: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ /*
+ * NOTE: we may want to handle this differently- currently, we will
+ * abort when we get an ack for the expected serial number + 1 (eg.
+ * we didn't send it), process an ack normally if it is the expected
+ * serial number, and re-send the previous ack for *ALL* other
+ * serial numbers
+ */
+
+ /*
+ * if the serial number is the next expected, but I didn't send it,
+ * abort the asoc, since someone probably just hijacked us...
+ */
+ if (serial_num == (asoc->asconf_seq_out + 1)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
+ SCTP_SNPRINTF(msg, sizeof(msg), "Never sent serial number %8.8x", serial_num);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_no_unlock = 1;
+ return;
+ }
+ if (serial_num != asoc->asconf_seq_out_acked + 1) {
+ /* got a duplicate/unexpected ASCONF-ACK */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
+ serial_num, asoc->asconf_seq_out_acked + 1);
+ return;
+ }
+
+ if (serial_num == asoc->asconf_seq_out - 1) {
+ /* stop our timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_5);
+ }
+
+ /* process the ASCONF-ACK contents */
+ ack_length = ntohs(cp->ch.chunk_length) -
+ sizeof(struct sctp_asconf_ack_chunk);
+ offset += sizeof(struct sctp_asconf_ack_chunk);
+ /* process through all parameters */
+ while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
+ unsigned int param_length, param_type;
+
+ /* get pointer to next asconf parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (param_length > ack_length) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ if (param_length < sizeof(struct sctp_paramhdr)) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* get the complete parameter... */
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "param length (%u) larger than buffer size!\n", param_length);
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* correlation_id is transparent to peer, no ntohl needed */
+ id = aph->correlation_id;
+
+ switch (param_type) {
+ case SCTP_ERROR_CAUSE_IND:
+ last_error_id = id;
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, failed flag */
+ sctp_asconf_process_param_ack(stcb, ap, 0);
+ /* process the error response */
+ sctp_asconf_process_error(stcb, aph);
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, success flag */
+ sctp_asconf_process_param_ack(stcb, ap, 1);
+ break;
+ default:
+ break;
+ } /* switch */
+
+ /* update remaining ASCONF-ACK message length to process */
+ if (ack_length > SCTP_SIZE32(param_length)) {
+ ack_length -= SCTP_SIZE32(param_length);
+ } else {
+ break;
+ }
+ offset += SCTP_SIZE32(param_length);
+ } /* while */
+
+ /*
+ * if there are any "sent" params still on the queue, these are
+ * implicitly "success", or "failed" (if we got an error back) ...
+ * so process these appropriately
+ *
+ * we assume that the correlation_id's are monotonically increasing
+ * beginning from 1 and that we don't have *that* many outstanding
+ * at any given time
+ */
+ if (last_error_id == 0)
+ last_error_id--; /* set to "max" value */
+ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+ if (aa->sent == 1) {
+ /*
+ * implicitly successful or failed if correlation_id
+ * < last_error_id, then success else, failure
+ */
+ if (aa->ap.aph.correlation_id < last_error_id)
+ sctp_asconf_process_param_ack(stcb, aa, 1);
+ else
+ sctp_asconf_process_param_ack(stcb, aa, 0);
+ } else {
+ /*
+ * since we always process in order (FIFO queue) if
+ * we reach one that hasn't been sent, the rest
+ * should not have been sent either. so, we're
+ * done...
+ */
+ break;
+ }
+ }
+
+ /* update the next sequence number to use */
+ asoc->asconf_seq_out_acked++;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+ if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ /* we have more params, so restart our timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep,
+ stcb, net);
+#else
+ /* we have more params, so send out more */
+ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+}
+
+#ifdef INET6
+static uint32_t
+sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sockaddr_in6 *sin6, *net6;
+ struct sctp_nets *net;
+
+ if (sa->sa_family != AF_INET6) {
+ /* wrong family */
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) {
+ /* not link local address */
+ return (0);
+ }
+ /* hunt through our destination nets list for this scope_id */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (((struct sockaddr *)(&net->ro._l_addr))->sa_family !=
+ AF_INET6)
+ continue;
+ net6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0)
+ continue;
+ if (sctp_is_same_scope(sin6, net6)) {
+ /* found one */
+ return (1);
+ }
+ }
+ /* didn't find one */
+ return (0);
+}
+#endif
+
+/*
+ * address management functions
+ */
+static void
+sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_ifa *ifa, uint16_t type, int addr_locked)
+{
+ int status;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 ||
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* subset bound, no ASCONF allowed case, so ignore */
+ return;
+ }
+ /*
+ * note: we know this is not the subset bound, no ASCONF case eg.
+ * this is boundall or subset bound w/ASCONF allowed
+ */
+
+ /* first, make sure that the address is IPv4 or IPv6 and not jailed */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin6.sin6_addr) != 0) {
+ return;
+ }
+#endif
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin.sin_addr) != 0) {
+ return;
+ }
+#endif
+ break;
+#endif
+ default:
+ return;
+ }
+#ifdef INET6
+ /* make sure we're "allowed" to add this type of addr */
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)
+ return;
+ /* is the v6 addr really valid ? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return;
+ }
+ }
+#endif
+ /* put this address on the "pending/do not use yet" list */
+ sctp_add_local_addr_restricted(stcb, ifa);
+ /*
+ * check address scope if address is out of scope, don't queue
+ * anything... note: this would leave the address on both inp and
+ * asoc lists
+ */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = &ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (stcb->asoc.scope.local_scope == 0) {
+ return;
+ }
+ /* is it the right link local scope? */
+ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+ return;
+ }
+ }
+ if (stcb->asoc.scope.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ return;
+ }
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp))
+ return;
+
+ sin = &ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ return;
+ }
+ break;
+ }
+#endif
+ default:
+ /* else, not AF_INET or AF_INET6, so skip */
+ return;
+ }
+
+ /* queue an asconf for this address add/delete */
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* does the peer do asconf? */
+ if (stcb->asoc.asconf_supported) {
+ /* queue an asconf for this addr */
+ status = sctp_asconf_queue_add(stcb, ifa, type);
+
+ /*
+ * if queued ok, and in the open state, send out the
+ * ASCONF. If in the non-open state, these will be
+ * sent when the state goes open.
+ */
+ if (status == 0 &&
+ ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED))) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+ stcb, stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, NULL, addr_locked);
+#endif
+ }
+ }
+ }
+}
+
+
+int
+sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l;
+ int cnt_invalid = 0;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ }
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ }
+ break;
+ }
+#endif
+ default:
+ /* invalid address family */
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static int
+sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+ struct sctp_ifa *ifa;
+ struct sctp_asconf_iterator *asc;
+ struct sctp_laddr *laddr, *nladdr, *l;
+
+ /* Only for specific case not bound all */
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ if (l->action == SCTP_ADD_IP_ADDRESS) {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ laddr->action = 0;
+ break;
+ }
+
+ }
+ } else if (l->action == SCTP_DEL_IP_ADDRESS) {
+ LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+ /* remove only after all guys are done */
+ if (laddr->ifa == ifa) {
+ sctp_del_local_addr_ep(inp, ifa);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+void
+sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ void *ptr, uint32_t val SCTP_UNUSED)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l;
+ int cnt_invalid = 0;
+ int type, status;
+ int num_queued = 0;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ type = l->action;
+
+ /* address's vrf_id must be the vrf_id of the assoc */
+ if (ifa->vrf_id != stcb->asoc.vrf_id) {
+ continue;
+ }
+
+ /* Same checks again for assoc */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* invalid if we're not a v6 endpoint */
+ struct sockaddr_in6 *sin6;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ }
+ sin6 = &ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (stcb->asoc.scope.local_scope == 0) {
+ continue;
+ }
+ /* is it the right link local scope? */
+ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+ continue;
+ }
+ }
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ /* invalid if we are a v6 only endpoint */
+ struct sockaddr_in *sin;
+
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp))
+ continue;
+
+ sin = &ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ }
+ break;
+ }
+#endif
+ default:
+ /* invalid address family */
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ break;
+ }
+
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ /* prevent this address from being used as a source */
+ sctp_add_local_addr_restricted(stcb, ifa);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_nets *net;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* delete this address if cached */
+ if (net->ro._s_addr == ifa) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+#endif
+ /*
+ * Now we deleted our src address,
+ * should we not also now reset the
+ * cwnd/rto to start as if its a new
+ * address?
+ */
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ net->RTO = 0;
+
+ }
+ }
+ } else if (type == SCTP_SET_PRIM_ADDR) {
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* must validate the ifa is in the ep */
+ if (sctp_is_addr_in_ep(stcb->sctp_ep, ifa) == 0) {
+ continue;
+ }
+ } else {
+ /* Need to check scopes for this guy */
+ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+ continue;
+ }
+ }
+ }
+ /* queue an asconf for this address add/delete */
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF) &&
+ stcb->asoc.asconf_supported == 1) {
+ /* queue an asconf for this addr */
+ status = sctp_asconf_queue_add(stcb, ifa, type);
+ /*
+ * if queued ok, and in the open state, update the
+ * count of queued params. If in the non-open state,
+ * these get sent when the assoc goes open.
+ */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ if (status >= 0) {
+ num_queued++;
+ }
+ }
+ }
+ }
+ /*
+ * If we have queued params in the open state, send out an ASCONF.
+ */
+ if (num_queued > 0) {
+ sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+ }
+}
+
+void
+sctp_asconf_iterator_end(void *ptr, uint32_t val SCTP_UNUSED)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l, *nl;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH_SAFE(l, &asc->list_of_work, sctp_nxt_addr, nl) {
+ ifa = l->ifa;
+ if (l->action == SCTP_ADD_IP_ADDRESS) {
+ /* Clear the defer use flag */
+ ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ sctp_free_ifa(ifa);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l);
+ SCTP_DECR_LADDR_COUNT();
+ }
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+}
+
+/*
+ * sa is the sockaddr to ask the peer to set primary to.
+ * returns: 0 = completed, -1 = error
+ */
+int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ uint32_t vrf_id;
+ struct sctp_ifa *ifa;
+
+ /* find the ifa for the desired set primary */
+ vrf_id = stcb->asoc.vrf_id;
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (ifa == NULL) {
+ /* Invalid address */
+ return (-1);
+ }
+
+ /* queue an ASCONF:SET_PRIM_ADDR to be sent */
+ if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) {
+ /* set primary queuing succeeded */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "set_primary_ip_address_sa: queued on tcb=%p, ",
+ (void *)stcb);
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ } else {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
+ (void *)stcb);
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ return (-1);
+ }
+ return (0);
+}
+
+int
+sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ unsigned int offset, asconf_limit;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_paramhdr *ph;
+ int add_cnt, del_cnt;
+ uint16_t last_param_type;
+
+ add_cnt = del_cnt = 0;
+ last_param_type = 0;
+ TAILQ_FOREACH_SAFE(chk, &stcb->asoc.asconf_send_queue, sctp_next, nchk) {
+ if (chk->data == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n");
+ continue;
+ }
+ offset = 0;
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ offset += sizeof(struct sctp_asconf_chunk);
+ asconf_limit = ntohs(acp->ch.chunk_length);
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf);
+ if (ph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n");
+ continue;
+ }
+ offset += ntohs(ph->param_length);
+
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n");
+ continue;
+ }
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ break;
+ }
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length);
+ break;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length);
+ break;
+ }
+
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n");
+ break;
+ }
+
+ ph = (struct sctp_paramhdr *)(aph + 1);
+ if (sctp_addr_match(ph, &sctp_ifa->address.sa) != 0) {
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ add_cnt++;
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ del_cnt++;
+ break;
+ default:
+ break;
+ }
+ last_param_type = param_type;
+ }
+
+ offset += SCTP_SIZE32(param_length);
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ }
+ }
+
+ /* we want to find the sequences which consist of ADD -> DEL -> ADD or DEL -> ADD */
+ if (add_cnt > del_cnt ||
+ (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
+ return (1);
+ }
+ return (0);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (stcb->asoc.scope.loopback_scope == 0 &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* Skip if loopback_scope not set */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = &sctp_ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
+ continue;
+
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
+ continue;
+ /* found a valid local v4 address to use */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (&sctp_ifa->address.sa);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ if (sctp_ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ continue;
+ }
+
+ sin6 = &sctp_ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (stcb->asoc.scope.local_scope == 0 &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
+ continue;
+ if (stcb->asoc.scope.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
+ continue;
+
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
+ continue;
+ /* found a valid local v6 address to use */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (&sctp_ifa->address.sa);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ /* no valid addresses found */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
+{
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ /* is the address restricted ? */
+ if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+ (!sctp_is_addr_pending(stcb, laddr->ifa)))
+ continue;
+
+ /* found a valid local address to use */
+ return (&laddr->ifa->address.sa);
+ }
+ /* no valid addresses found */
+ return (NULL);
+}
+
+/*
+ * builds an ASCONF chunk from queued ASCONF params.
+ * returns NULL on error (no mbuf, no ASCONF params queued, etc).
+ */
+struct mbuf *
+sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
+{
+ struct mbuf *m_asconf, *m_asconf_chk;
+ struct sctp_asconf_addr *aa;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr_param *aap;
+ uint32_t p_length;
+ uint32_t correlation_id = 1; /* 0 is reserved... */
+ caddr_t ptr, lookup_ptr;
+ uint8_t lookup_used = 0;
+
+ /* are there any asconf params to send? */
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent == 0)
+ break;
+ }
+ if (aa == NULL)
+ return (NULL);
+
+ /*
+ * get a chunk header mbuf and a cluster for the asconf params since
+ * it's simpler to fill in the asconf chunk header lookup address on
+ * the fly
+ */
+ m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_asconf_chk == NULL) {
+ /* no mbuf's */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_compose_asconf: couldn't get chunk mbuf!\n");
+ return (NULL);
+ }
+ m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_asconf == NULL) {
+ /* no mbuf's */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_compose_asconf: couldn't get mbuf!\n");
+ sctp_m_freem(m_asconf_chk);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk);
+ SCTP_BUF_LEN(m_asconf) = 0;
+ acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
+ memset(acp, 0, sizeof(struct sctp_asconf_chunk));
+ /* save pointers to lookup address and asconf params */
+ lookup_ptr = (caddr_t)(acp + 1); /* after the header */
+ ptr = mtod(m_asconf, caddr_t); /* beginning of cluster */
+
+ /* fill in chunk header info */
+ acp->ch.chunk_type = SCTP_ASCONF;
+ acp->ch.chunk_flags = 0;
+ acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+ stcb->asoc.asconf_seq_out++;
+
+ /* add parameters... up to smallest MTU allowed */
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent)
+ continue;
+ /* get the parameter length */
+ p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
+ /* will it fit in current chunk? */
+ if ((SCTP_BUF_LEN(m_asconf) + p_length > stcb->asoc.smallest_mtu) ||
+ (SCTP_BUF_LEN(m_asconf) + p_length > MCLBYTES)) {
+ /* won't fit, so we're done with this chunk */
+ break;
+ }
+ /* assign (and store) a correlation id */
+ aa->ap.aph.correlation_id = correlation_id++;
+
+ /*
+ * fill in address if we're doing a delete this is a simple
+ * way for us to fill in the correlation address, which
+ * should only be used by the peer if we're deleting our
+ * source address and adding a new address (e.g. renumbering
+ * case)
+ */
+ if (lookup_used == 0 &&
+ (aa->special_del == 0) &&
+ aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ lookup->ph.param_type =
+ htons(aa->ap.addrp.ph.param_type);
+ if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) {
+ /* copy IPv6 address */
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ } else {
+ /* copy IPv4 address */
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size);
+ SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+ lookup_used = 1;
+ }
+ /* copy into current space */
+ memcpy(ptr, &aa->ap, p_length);
+
+ /* network elements and update lengths */
+ aph = (struct sctp_asconf_paramhdr *)ptr;
+ aap = (struct sctp_asconf_addr_param *)ptr;
+ /* correlation_id is transparent to peer, no htonl needed */
+ aph->ph.param_type = htons(aph->ph.param_type);
+ aph->ph.param_length = htons(aph->ph.param_length);
+ aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type);
+ aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length);
+
+ SCTP_BUF_LEN(m_asconf) += SCTP_SIZE32(p_length);
+ ptr += SCTP_SIZE32(p_length);
+
+ /*
+ * these params are removed off the pending list upon
+ * getting an ASCONF-ACK back from the peer, just set flag
+ */
+ aa->sent = 1;
+ }
+ /* check to see if the lookup addr has been populated yet */
+ if (lookup_used == 0) {
+ /* NOTE: if the address param is optional, can skip this... */
+ /* add any valid (existing) address... */
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+ struct sockaddr *found_addr;
+ caddr_t addr_ptr;
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)
+ found_addr = sctp_find_valid_localaddr(stcb,
+ addr_locked);
+ else
+ found_addr = sctp_find_valid_localaddr_ep(stcb);
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ if (found_addr != NULL) {
+ switch (found_addr->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ /* copy IPv6 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV6_ADDRESS);
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in6 *)
+ found_addr)->sin6_addr;
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+ /* copy IPv4 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV4_ADDRESS);
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in *)
+ found_addr)->sin_addr;
+ break;
+#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_compose_asconf: no usable lookup addr (family = %d)!\n",
+ found_addr->sa_family);
+ sctp_m_freem(m_asconf_chk);
+ sctp_m_freem(m_asconf);
+ return (NULL);
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, addr_ptr, addr_size);
+ SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+ } else {
+ /* uh oh... don't have any address?? */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_compose_asconf: no lookup addr!\n");
+ sctp_m_freem(m_asconf_chk);
+ sctp_m_freem(m_asconf);
+ return (NULL);
+ }
+ }
+ /* chain it all together */
+ SCTP_BUF_NEXT(m_asconf_chk) = m_asconf;
+ *retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
+ acp->ch.chunk_length = htons(*retlen);
+
+ return (m_asconf_chk);
+}
+
+/*
+ * section to handle address changes before an association is up eg. changes
+ * during INIT/INIT-ACK/COOKIE-ECHO handshake
+ */
+
+/*
+ * processes the (local) addresses in the INIT-ACK chunk
+ */
+static void
+sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
+ unsigned int offset, unsigned int length)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+ struct sctp_ifa *sctp_ifa;
+ union sctp_sockstore store;
+#ifdef INET6
+ struct sctp_ipv6addr_param addr6_store;
+#endif
+#ifdef INET
+ struct sctp_ipv4addr_param addr4_store;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n");
+ if (stcb == NULL) /* Un-needed check for SA */
+ return;
+
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+ return;
+ }
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)
+ sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *)&tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+ switch (ptype) {
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ {
+ struct sctp_ipv6addr_param *a6p;
+
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *)&addr6_store);
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ a6p == NULL) {
+ return;
+ }
+ memset(&store, 0, sizeof(union sctp_sockstore));
+ store.sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ store.sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ store.sin6.sin6_port = stcb->rport;
+ memcpy(&store.sin6.sin6_addr, a6p->addr, sizeof(struct in6_addr));
+ break;
+ }
+#endif
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ {
+ struct sctp_ipv4addr_param *a4p;
+
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv4addr_param),
+ (uint8_t *)&addr4_store);
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ a4p == NULL) {
+ return;
+ }
+ memset(&store, 0, sizeof(union sctp_sockstore));
+ store.sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ store.sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+ store.sin.sin_port = stcb->rport;
+ store.sin.sin_addr.s_addr = a4p->addr;
+ break;
+ }
+#endif
+ default:
+ goto next_addr;
+ }
+
+ /* see if this address really (still) exists */
+ sctp_ifa = sctp_find_ifa_by_addr(&store.sa, stcb->asoc.vrf_id,
+ SCTP_ADDR_NOT_LOCKED);
+ if (sctp_ifa == NULL) {
+ /* address doesn't exist anymore */
+ int status;
+
+ /* are ASCONFs allowed ? */
+ if ((sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ stcb->asoc.asconf_supported) {
+ /* queue an ASCONF DEL_IP_ADDRESS */
+ status = sctp_asconf_queue_sa_delete(stcb, &store.sa);
+ /*
+ * if queued ok, and in correct state, send
+ * out the ASCONF.
+ */
+ if (status == 0 &&
+ SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ }
+ }
+
+next_addr:
+ /*
+ * Sanity check: Make sure the length isn't 0, otherwise
+ * we'll be stuck in this loop for a long time...
+ */
+ if (SCTP_SIZE32(plen) == 0) {
+ SCTP_PRINTF("process_initack_addrs: bad len (%d) type=%xh\n",
+ plen, ptype);
+ return;
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if ((offset + sizeof(struct sctp_paramhdr)) > length)
+ return;
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param);
+ } /* while */
+}
+
+/* FIX ME: need to verify return result for v6 address type if v6 disabled */
+/*
+ * checks to see if a specific address is in the initack address list returns
+ * 1 if found, 0 if not
+ */
+static uint32_t
+sctp_addr_in_initack(struct mbuf *m, uint32_t offset, uint32_t length, struct sockaddr *sa)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+#ifdef INET
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *a4p;
+ struct sctp_ipv6addr_param addr4_store;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *a6p;
+ struct sctp_ipv6addr_param addr6_store;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ struct sockaddr_in6 sin6_tmp;
+#endif
+#endif
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+ default:
+ return (0);
+ }
+
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "find_initack_addr: starting search for ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "find_initack_addr: invalid offset?\n");
+ return (0);
+ }
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+ switch (ptype) {
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ if (sa->sa_family == AF_INET6) {
+ /* get the entire IPv6 address param */
+ if (plen != sizeof(struct sctp_ipv6addr_param)) {
+ break;
+ }
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *)&addr6_store);
+ if (a6p == NULL) {
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+ /* create a copy and clear scope */
+ memcpy(&sin6_tmp, sin6,
+ sizeof(struct sockaddr_in6));
+ sin6 = &sin6_tmp;
+ in6_clearscope(&sin6->sin6_addr);
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ if (memcmp(&sin6->sin6_addr, a6p->addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* found it */
+ return (1);
+ }
+ }
+ break;
+#endif /* INET6 */
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ if (sa->sa_family == AF_INET) {
+ if (plen != sizeof(struct sctp_ipv4addr_param)) {
+ break;
+ }
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv4addr_param),
+ (uint8_t *)&addr4_store);
+ if (a4p == NULL) {
+ return (0);
+ }
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_addr.s_addr == a4p->addr) {
+ /* found it */
+ return (1);
+ }
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length) {
+ return (0);
+ }
+ ph = (struct sctp_paramhdr *)
+ sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *) & tmp_param);
+ } /* while */
+ /* not found! */
+ return (0);
+}
+
+/*
+ * makes sure that the current endpoint local addr list is consistent with
+ * the new association (eg. subset bound, asconf allowed) adds addresses as
+ * necessary
+ */
+static void
+sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr)
+{
+ struct sctp_laddr *laddr;
+
+ /* go through the endpoint list */
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ /* be paranoid and validate the laddr */
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "check_addr_list_ep: laddr->ifa is NULL");
+ continue;
+ }
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) {
+ continue;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(m, offset, length, &laddr->ifa->address.sa)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa,
+ SCTP_ADD_IP_ADDRESS, SCTP_ADDR_NOT_LOCKED);
+ }
+ }
+}
+
+/*
+ * makes sure that the current kernel address list is consistent with the new
+ * association (with all addrs bound) adds addresses as necessary
+ */
+static void
+sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t vrf_id;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ return;
+ }
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return;
+ }
+ /* go through all our known interfaces */
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* skip loopback interface */
+ continue;
+ }
+ /* go through each interface address */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(&sctp_ifa->address.sa, init_addr)) {
+ continue;
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ sin = &sctp_ifa->address.sin;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if ((ipv4_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ /* private address not in scope */
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = &sctp_ifa->address.sin6;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if ((local_scope == 0) &&
+ (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(m, offset, length, &sctp_ifa->address.sa)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb,
+ sctp_ifa, SCTP_ADD_IP_ADDRESS,
+ SCTP_ADDR_LOCKED);
+ }
+ } /* end foreach ifa */
+ } /* end foreach ifn */
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+/*
+ * validates an init-ack chunk (from a cookie-echo) with current addresses
+ * adds addresses from the init-ack into our local address list, if needed
+ * queues asconf adds/deletes addresses as needed and makes appropriate list
+ * changes for source address selection m, offset: points to the start of the
+ * address list in an init-ack chunk length: total length of the address
+ * params only init_addr: address where my INIT-ACK was sent from
+ */
+void
+sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ /* process the local addresses in the initack */
+ sctp_process_initack_addresses(stcb, m, offset, length);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* bound all case */
+ sctp_check_address_list_all(stcb, m, offset, length, init_addr,
+ local_scope, site_scope, ipv4_scope, loopback_scope);
+ } else {
+ /* subset bound case */
+ if (sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* asconf's allowed */
+ sctp_check_address_list_ep(stcb, m, offset, length,
+ init_addr);
+ }
+ /* else, no asconfs allowed, so what we sent is what we get */
+ }
+}
+
+/*
+ * sctp_bindx() support
+ */
+uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
+ uint32_t type, uint32_t vrf_id)
+{
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *laddr, *nladdr;
+
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ /* For an add the address MUST be on the system */
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ /* For a delete we need to find it in the inp */
+ ifa = sctp_find_ifa_in_ep(inp, sa, SCTP_ADDR_NOT_LOCKED);
+ } else {
+ ifa = NULL;
+ }
+ if (ifa != NULL) {
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ sctp_add_local_addr_ep(inp, ifa, type);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ if (inp->laddr_count < 2) {
+ /* can't delete the last local address */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+ return (EINVAL);
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (ifa == laddr->ifa) {
+ /* Mark in the delete */
+ laddr->action = type;
+ }
+ }
+ }
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /*
+ * There is no need to start the iterator if
+ * the inp has no associations.
+ */
+ if (type == SCTP_DEL_IP_ADDRESS) {
+ LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+ if (laddr->ifa == ifa) {
+ sctp_del_local_addr_ep(inp, ifa);
+ }
+ }
+ }
+ } else {
+ struct sctp_asconf_iterator *asc;
+ struct sctp_laddr *wi;
+ int ret;
+
+ SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+ sizeof(struct sctp_asconf_iterator),
+ SCTP_M_ASC_IT);
+ if (asc == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+ return (ENOMEM);
+ }
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+ return (ENOMEM);
+ }
+ LIST_INIT(&asc->list_of_work);
+ asc->cnt = 1;
+ SCTP_INCR_LADDR_COUNT();
+ wi->ifa = ifa;
+ wi->action = type;
+ atomic_add_int(&ifa->refcount, 1);
+ LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+ ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
+ sctp_asconf_iterator_stcb,
+ sctp_asconf_iterator_ep_end,
+ SCTP_PCB_ANY_FLAGS,
+ SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)asc, 0,
+ sctp_asconf_iterator_end, inp, 0);
+ if (ret) {
+ SCTP_PRINTF("Failed to initiate iterator for addr_mgmt_ep_sa\n");
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EFAULT);
+ sctp_asconf_iterator_end(asc, 0);
+ return (EFAULT);
+ }
+ }
+ return (0);
+ } else {
+ /* invalid address! */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+}
+
+void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_asconf_addr *aa_vtag, *aa_add, *aa_del;
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_asconf_tag_param *vtag;
+#ifdef INET
+ struct sockaddr_in *to;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *to6;
+#endif
+
+ if (net == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
+ return;
+ }
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
+ return;
+ }
+ /* Need to have in the ASCONF:
+ * - VTAG(my_vtag/peer_vtag)
+ * - ADD(wildcard)
+ * - DEL(wildcard)
+ * - ADD(Any global addresses)
+ */
+ SCTP_MALLOC(aa_vtag, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR);
+ SCTP_MALLOC(aa_add, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR);
+ SCTP_MALLOC(aa_del, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR);
+
+ if ((aa_vtag == NULL) || (aa_add == NULL) || (aa_del == NULL)) {
+ /* Didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+out:
+ if (aa_vtag != NULL) {
+ SCTP_FREE(aa_vtag, SCTP_M_ASC_ADDR);
+ }
+ if (aa_add != NULL) {
+ SCTP_FREE(aa_add, SCTP_M_ASC_ADDR);
+ }
+ if (aa_del != NULL) {
+ SCTP_FREE(aa_del, SCTP_M_ASC_ADDR);
+ }
+ return;
+ }
+ memset(aa_vtag, 0, sizeof(struct sctp_asconf_addr));
+ aa_vtag->special_del = 0;
+ /* Fill in ASCONF address parameter fields. */
+ /* Top level elements are "networked" during send. */
+ aa_vtag->ifa = NULL;
+ aa_vtag->sent = 0; /* clear sent flag */
+ vtag = (struct sctp_asconf_tag_param *)&aa_vtag->ap.aph;
+ vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
+ vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
+ vtag->local_vtag = htonl(stcb->asoc.my_vtag);
+ vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
+
+ memset(aa_add, 0, sizeof(struct sctp_asconf_addr));
+ memset(aa_del, 0, sizeof(struct sctp_asconf_addr));
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ aa_add->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa_add->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa_add->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa_add->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+ /* No need to fill the address, we are using 0.0.0.0 */
+ aa_del->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa_del->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa_del->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa_del->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+ /* No need to fill the address, we are using 0.0.0.0 */
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ aa_add->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa_add->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa_add->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa_add->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+ /* No need to fill the address, we are using ::0 */
+ aa_del->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+ aa_del->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa_del->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa_del->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+ /* No need to fill the address, we are using ::0 */
+ break;
+#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: unknown address family %d\n",
+ net->ro._l_addr.sa.sa_family);
+ goto out;
+ }
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_vtag, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_add, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_del, next);
+
+ /* Now we must hunt the addresses and add all global addresses */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifnp;
+ uint32_t vrf_id;
+
+ vrf_id = stcb->sctp_ep->def_vrf_id;
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ goto skip_rest;
+ }
+
+ SCTP_IPI_ADDR_RLOCK();
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ to = &sctp_ifap->address.sin;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &to->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ to6 = &sctp_ifap->address.sin6;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &to6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ /* Address being deleted by the system, dont
+ * list.
+ */
+ continue;
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* Address being deleted on this ep
+ * don't list.
+ */
+ continue;
+ }
+ sctp_ifap = laddr->ifa;
+ switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ to = &sctp_ifap->address.sin;
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ to6 = &sctp_ifap->address.sin6;
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+ skip_rest:
+ /* Now we must send the asconf into the queue */
+ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+}
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.h
new file mode 100644
index 000000000..e171a0b56
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_asconf.h
@@ -0,0 +1,96 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 362377 2020-06-19 12:35:29Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_ASCONF_H_
+#define _NETINET_SCTP_ASCONF_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/*
+ * function prototypes
+ */
+extern void sctp_asconf_cleanup(struct sctp_tcb *);
+
+extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *, int);
+
+extern void
+sctp_handle_asconf(struct mbuf *, unsigned int, struct sockaddr *,
+ struct sctp_asconf_chunk *, struct sctp_tcb *, int);
+
+extern void
+sctp_handle_asconf_ack(struct mbuf *, int, struct sctp_asconf_ack_chunk *,
+ struct sctp_tcb *, struct sctp_nets *, int *);
+
+extern uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *, uint32_t,
+ uint32_t);
+
+
+extern int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr,
+ uint32_t val);
+extern void sctp_asconf_iterator_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ void *ptr, uint32_t type);
+extern void sctp_asconf_iterator_end(void *ptr, uint32_t val);
+
+
+extern int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *,
+ struct sockaddr *);
+
+extern void
+sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern void
+sctp_assoc_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+extern void
+sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#endif
+
+extern void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+extern int
+sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_ASCONF_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.c
new file mode 100644
index 000000000..3a8fac073
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.c
@@ -0,0 +1,2314 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 362054 2020-06-11 13:34:09Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_auth.h>
+
+#ifdef SCTP_DEBUG
+#define SCTP_AUTH_DEBUG (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
+#define SCTP_AUTH_DEBUG2 (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
+#endif /* SCTP_DEBUG */
+
+
+void
+sctp_clear_chunklist(sctp_auth_chklist_t *chklist)
+{
+ memset(chklist, 0, sizeof(*chklist));
+ /* chklist->num_chunks = 0; */
+}
+
+sctp_auth_chklist_t *
+sctp_alloc_chunklist(void)
+{
+ sctp_auth_chklist_t *chklist;
+
+ SCTP_MALLOC(chklist, sctp_auth_chklist_t *, sizeof(*chklist),
+ SCTP_M_AUTH_CL);
+ if (chklist == NULL) {
+ SCTPDBG(SCTP_DEBUG_AUTH1, "sctp_alloc_chunklist: failed to get memory!\n");
+ } else {
+ sctp_clear_chunklist(chklist);
+ }
+ return (chklist);
+}
+
+void
+sctp_free_chunklist(sctp_auth_chklist_t *list)
+{
+ if (list != NULL)
+ SCTP_FREE(list, SCTP_M_AUTH_CL);
+}
+
+sctp_auth_chklist_t *
+sctp_copy_chunklist(sctp_auth_chklist_t *list)
+{
+ sctp_auth_chklist_t *new_list;
+
+ if (list == NULL)
+ return (NULL);
+
+ /* get a new list */
+ new_list = sctp_alloc_chunklist();
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ memcpy(new_list, list, sizeof(*new_list));
+
+ return (new_list);
+}
+
+
+/*
+ * add a chunk to the required chunks list
+ */
+int
+sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+ if (list == NULL)
+ return (-1);
+
+ /* is chunk restricted? */
+ if ((chunk == SCTP_INITIATION) ||
+ (chunk == SCTP_INITIATION_ACK) ||
+ (chunk == SCTP_SHUTDOWN_COMPLETE) ||
+ (chunk == SCTP_AUTHENTICATION)) {
+ return (-1);
+ }
+ if (list->chunks[chunk] == 0) {
+ list->chunks[chunk] = 1;
+ list->num_chunks++;
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: added chunk %u (0x%02x) to Auth list\n",
+ chunk, chunk);
+ }
+ return (0);
+}
+
+/*
+ * delete a chunk from the required chunks list
+ */
+int
+sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+ if (list == NULL)
+ return (-1);
+
+ if (list->chunks[chunk] == 1) {
+ list->chunks[chunk] = 0;
+ list->num_chunks--;
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: deleted chunk %u (0x%02x) from Auth list\n",
+ chunk, chunk);
+ }
+ return (0);
+}
+
+size_t
+sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list)
+{
+ if (list == NULL)
+ return (0);
+ else
+ return (list->num_chunks);
+}
+
+/*
+ * return the current number and list of required chunks caller must
+ * guarantee ptr has space for up to 256 bytes
+ */
+int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+ int i, count = 0;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ count++;
+ }
+ }
+ return (count);
+}
+
+int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+ int i, size = 0;
+
+ if (list == NULL)
+ return (0);
+
+ if (list->num_chunks <= 32) {
+ /* just list them, one byte each */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ size++;
+ }
+ }
+ } else {
+ int index, offset;
+
+ /* pack into a 32 byte bitfield */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ index = i / 8;
+ offset = i % 8;
+ ptr[index] |= (1 << offset);
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+int
+sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t *list)
+{
+ int i;
+ int size;
+
+ if (list == NULL)
+ return (0);
+
+ if (num_chunks <= 32) {
+ /* just pull them, one byte each */
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(*ptr++, list);
+ }
+ size = num_chunks;
+ } else {
+ int index, offset;
+
+ /* unpack from a 32 byte bitfield */
+ for (index = 0; index < 32; index++) {
+ for (offset = 0; offset < 8; offset++) {
+ if (ptr[index] & (1 << offset)) {
+ (void)sctp_auth_add_chunk((index * 8) + offset, list);
+ }
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+
+/*
+ * allocate structure space for a key of length keylen
+ */
+sctp_key_t *
+sctp_alloc_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_key_t *, sizeof(*new_key) + keylen,
+ SCTP_M_AUTH_KY);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+void
+sctp_free_key(sctp_key_t *key)
+{
+ if (key != NULL)
+ SCTP_FREE(key,SCTP_M_AUTH_KY);
+}
+
+void
+sctp_print_key(sctp_key_t *key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ SCTP_PRINTF("%s: [Null key]\n", str);
+ return;
+ }
+ SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ SCTP_PRINTF("%02x", key->key[i]);
+ SCTP_PRINTF("\n");
+ } else {
+ SCTP_PRINTF("[Null key]\n");
+ }
+}
+
+void
+sctp_show_key(sctp_key_t *key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ SCTP_PRINTF("%s: [Null key]\n", str);
+ return;
+ }
+ SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ SCTP_PRINTF("%02x", key->key[i]);
+ SCTP_PRINTF("\n");
+ } else {
+ SCTP_PRINTF("[Null key]\n");
+ }
+}
+
+static uint32_t
+sctp_get_keylen(sctp_key_t *key)
+{
+ if (key != NULL)
+ return (key->keylen);
+ else
+ return (0);
+}
+
+/*
+ * generate a new random key of length 'keylen'
+ */
+sctp_key_t *
+sctp_generate_random_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ SCTP_READ_RANDOM(new_key->key, keylen);
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+sctp_key_t *
+sctp_set_key(uint8_t *key, uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ memcpy(new_key->key, key, keylen);
+ return (new_key);
+}
+
+/*-
+ * given two keys of variable size, compute which key is "larger/smaller"
+ * returns: 1 if key1 > key2
+ * -1 if key1 < key2
+ * 0 if key1 = key2
+ */
+static int
+sctp_compare_key(sctp_key_t *key1, sctp_key_t *key2)
+{
+ uint32_t maxlen;
+ uint32_t i;
+ uint32_t key1len, key2len;
+ uint8_t *key_1, *key_2;
+ uint8_t val1, val2;
+
+ /* sanity/length check */
+ key1len = sctp_get_keylen(key1);
+ key2len = sctp_get_keylen(key2);
+ if ((key1len == 0) && (key2len == 0))
+ return (0);
+ else if (key1len == 0)
+ return (-1);
+ else if (key2len == 0)
+ return (1);
+
+ if (key1len < key2len) {
+ maxlen = key2len;
+ } else {
+ maxlen = key1len;
+ }
+ key_1 = key1->key;
+ key_2 = key2->key;
+ /* check for numeric equality */
+ for (i = 0; i < maxlen; i++) {
+ /* left-pad with zeros */
+ val1 = (i < (maxlen - key1len)) ? 0 : *(key_1++);
+ val2 = (i < (maxlen - key2len)) ? 0 : *(key_2++);
+ if (val1 > val2) {
+ return (1);
+ } else if (val1 < val2) {
+ return (-1);
+ }
+ }
+ /* keys are equal value, so check lengths */
+ if (key1len == key2len)
+ return (0);
+ else if (key1len < key2len)
+ return (-1);
+ else
+ return (1);
+}
+
+/*
+ * generate the concatenated keying material based on the two keys and the
+ * shared key (if available). draft-ietf-tsvwg-auth specifies the specific
+ * order for concatenation
+ */
+sctp_key_t *
+sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2, sctp_key_t *shared)
+{
+ uint32_t keylen;
+ sctp_key_t *new_key;
+ uint8_t *key_ptr;
+
+ keylen = sctp_get_keylen(key1) + sctp_get_keylen(key2) +
+ sctp_get_keylen(shared);
+
+ if (keylen > 0) {
+ /* get space for the new key */
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ key_ptr = new_key->key;
+ } else {
+ /* all keys empty/null?! */
+ return (NULL);
+ }
+
+ /* concatenate the keys */
+ if (sctp_compare_key(key1, key2) <= 0) {
+ /* key is shared + key1 + key2 */
+ if (sctp_get_keylen(shared)) {
+ memcpy(key_ptr, shared->key, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key1)) {
+ memcpy(key_ptr, key1->key, key1->keylen);
+ key_ptr += key1->keylen;
+ }
+ if (sctp_get_keylen(key2)) {
+ memcpy(key_ptr, key2->key, key2->keylen);
+ }
+ } else {
+ /* key is shared + key2 + key1 */
+ if (sctp_get_keylen(shared)) {
+ memcpy(key_ptr, shared->key, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key2)) {
+ memcpy(key_ptr, key2->key, key2->keylen);
+ key_ptr += key2->keylen;
+ }
+ if (sctp_get_keylen(key1)) {
+ memcpy(key_ptr, key1->key, key1->keylen);
+ }
+ }
+ return (new_key);
+}
+
+
+sctp_sharedkey_t *
+sctp_alloc_sharedkey(void)
+{
+ sctp_sharedkey_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_sharedkey_t *, sizeof(*new_key),
+ SCTP_M_AUTH_KY);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keyid = 0;
+ new_key->key = NULL;
+ new_key->refcount = 1;
+ new_key->deactivated = 0;
+ return (new_key);
+}
+
+void
+sctp_free_sharedkey(sctp_sharedkey_t *skey)
+{
+ if (skey == NULL)
+ return;
+
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
+ if (skey->key != NULL)
+ sctp_free_key(skey->key);
+ SCTP_FREE(skey, SCTP_M_AUTH_KY);
+ }
+}
+
+sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (skey->keyid == key_id)
+ return (skey);
+ }
+ return (NULL);
+}
+
+int
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t *new_skey)
+{
+ sctp_sharedkey_t *skey;
+
+ if ((shared_keys == NULL) || (new_skey == NULL))
+ return (EINVAL);
+
+ /* insert into an empty list? */
+ if (LIST_EMPTY(shared_keys)) {
+ LIST_INSERT_HEAD(shared_keys, new_skey, next);
+ return (0);
+ }
+ /* insert into the existing list, ordered by key id */
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (new_skey->keyid < skey->keyid) {
+ /* insert it before here */
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ return (0);
+ } else if (new_skey->keyid == skey->keyid) {
+ /* replace the existing key */
+ /* verify this key *can* be replaced */
+ if ((skey->deactivated) || (skey->refcount > 1)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "can't replace shared key id %u\n",
+ new_skey->keyid);
+ return (EBUSY);
+ }
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "replacing shared key id %u\n",
+ new_skey->keyid);
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey);
+ return (0);
+ }
+ if (LIST_NEXT(skey, next) == NULL) {
+ /* belongs at the end of the list */
+ LIST_INSERT_AFTER(skey, new_skey, next);
+ return (0);
+ }
+ }
+ /* shouldn't reach here */
+ return (EINVAL);
+}
+
+void
+sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* bump the ref count */
+ if (skey) {
+ atomic_add_int(&skey->refcount, 1);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount acquire to %d\n",
+ __func__, (void *)stcb, key_id, skey->refcount);
+ }
+}
+
+void
+sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* decrement the ref count */
+ if (skey) {
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount release to %d\n",
+ __func__, (void *)stcb, key_id, skey->refcount);
+
+ /* see if a notification should be generated */
+ if ((skey->refcount <= 2) && (skey->deactivated)) {
+ /* notify ULP that key is no longer used */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
+ key_id, 0, so_locked);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u no longer used, %d\n",
+ __func__, (void *)stcb, key_id, skey->refcount);
+ }
+ sctp_free_sharedkey(skey);
+ }
+}
+
+static sctp_sharedkey_t *
+sctp_copy_sharedkey(const sctp_sharedkey_t *skey)
+{
+ sctp_sharedkey_t *new_skey;
+
+ if (skey == NULL)
+ return (NULL);
+ new_skey = sctp_alloc_sharedkey();
+ if (new_skey == NULL)
+ return (NULL);
+ if (skey->key != NULL)
+ new_skey->key = sctp_set_key(skey->key->key, skey->key->keylen);
+ else
+ new_skey->key = NULL;
+ new_skey->keyid = skey->keyid;
+ return (new_skey);
+}
+
+int
+sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
+{
+ sctp_sharedkey_t *skey, *new_skey;
+ int count = 0;
+
+ if ((src == NULL) || (dest == NULL))
+ return (0);
+ LIST_FOREACH(skey, src, next) {
+ new_skey = sctp_copy_sharedkey(skey);
+ if (new_skey != NULL) {
+ if (sctp_insert_sharedkey(dest, new_skey)) {
+ sctp_free_sharedkey(new_skey);
+ } else {
+ count++;
+ }
+ }
+ }
+ return (count);
+}
+
+
+sctp_hmaclist_t *
+sctp_alloc_hmaclist(uint16_t num_hmacs)
+{
+ sctp_hmaclist_t *new_list;
+ int alloc_size;
+
+ alloc_size = sizeof(*new_list) + num_hmacs * sizeof(new_list->hmac[0]);
+ SCTP_MALLOC(new_list, sctp_hmaclist_t *, alloc_size,
+ SCTP_M_AUTH_HL);
+ if (new_list == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_list->max_algo = num_hmacs;
+ new_list->num_algo = 0;
+ return (new_list);
+}
+
+void
+sctp_free_hmaclist(sctp_hmaclist_t *list)
+{
+ if (list != NULL) {
+ SCTP_FREE(list,SCTP_M_AUTH_HL);
+ }
+}
+
+int
+sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id)
+{
+ int i;
+ if (list == NULL)
+ return (-1);
+ if (list->num_algo == list->max_algo) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: HMAC id list full, ignoring add %u\n", hmac_id);
+ return (-1);
+ }
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ if ((hmac_id != SCTP_AUTH_HMAC_ID_SHA1) &&
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA256)) {
+#else
+ if (hmac_id != SCTP_AUTH_HMAC_ID_SHA1) {
+#endif
+ return (-1);
+ }
+ /* Now is it already in the list */
+ for (i = 0; i < list->num_algo; i++) {
+ if (list->hmac[i] == hmac_id) {
+ /* already in list */
+ return (-1);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: add HMAC id %u to list\n", hmac_id);
+ list->hmac[list->num_algo++] = hmac_id;
+ return (0);
+}
+
+sctp_hmaclist_t *
+sctp_copy_hmaclist(sctp_hmaclist_t *list)
+{
+ sctp_hmaclist_t *new_list;
+ int i;
+
+ if (list == NULL)
+ return (NULL);
+ /* get a new list */
+ new_list = sctp_alloc_hmaclist(list->max_algo);
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ new_list->max_algo = list->max_algo;
+ new_list->num_algo = list->num_algo;
+ for (i = 0; i < list->num_algo; i++)
+ new_list->hmac[i] = list->hmac[i];
+ return (new_list);
+}
+
+sctp_hmaclist_t *
+sctp_default_supported_hmaclist(void)
+{
+ sctp_hmaclist_t *new_list;
+
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ new_list = sctp_alloc_hmaclist(2);
+#else
+ new_list = sctp_alloc_hmaclist(1);
+#endif
+ if (new_list == NULL)
+ return (NULL);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ /* We prefer SHA256, so list it first */
+ (void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA256);
+#endif
+ (void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA1);
+ return (new_list);
+}
+
+/*-
+ * HMAC algos are listed in priority/preference order
+ * find the best HMAC id to use for the peer based on local support
+ */
+uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t *peer, sctp_hmaclist_t *local)
+{
+ int i, j;
+
+ if ((local == NULL) || (peer == NULL))
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+
+ for (i = 0; i < peer->num_algo; i++) {
+ for (j = 0; j < local->num_algo; j++) {
+ if (peer->hmac[i] == local->hmac[j]) {
+ /* found the "best" one */
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: negotiated peer HMAC id %u\n",
+ peer->hmac[i]);
+ return (peer->hmac[i]);
+ }
+ }
+ }
+ /* didn't find one! */
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+}
+
+/*-
+ * serialize the HMAC algo list and return space used
+ * caller must guarantee ptr has appropriate space
+ */
+int
+sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr)
+{
+ int i;
+ uint16_t hmac_id;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++) {
+ hmac_id = htons(list->hmac[i]);
+ memcpy(ptr, &hmac_id, sizeof(hmac_id));
+ ptr += sizeof(hmac_id);
+ }
+ return (list->num_algo * sizeof(hmac_id));
+}
+
+int
+sctp_verify_hmac_param (struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
+{
+ uint32_t i;
+
+ for (i = 0; i < num_hmacs; i++) {
+ if (ntohs(hmacs->hmac_ids[i]) == SCTP_AUTH_HMAC_ID_SHA1) {
+ return (0);
+ }
+ }
+ return (-1);
+}
+
+sctp_authinfo_t *
+sctp_alloc_authinfo(void)
+{
+ sctp_authinfo_t *new_authinfo;
+
+ SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
+ SCTP_M_AUTH_IF);
+
+ if (new_authinfo == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ memset(new_authinfo, 0, sizeof(*new_authinfo));
+ return (new_authinfo);
+}
+
+void
+sctp_free_authinfo(sctp_authinfo_t *authinfo)
+{
+ if (authinfo == NULL)
+ return;
+
+ if (authinfo->random != NULL)
+ sctp_free_key(authinfo->random);
+ if (authinfo->peer_random != NULL)
+ sctp_free_key(authinfo->peer_random);
+ if (authinfo->assoc_key != NULL)
+ sctp_free_key(authinfo->assoc_key);
+ if (authinfo->recv_key != NULL)
+ sctp_free_key(authinfo->recv_key);
+
+ /* We are NOT dynamically allocating authinfo's right now... */
+ /* SCTP_FREE(authinfo, SCTP_M_AUTH_??); */
+}
+
+
+uint32_t
+sctp_get_auth_chunk_len(uint16_t hmac_algo)
+{
+ int size;
+
+ size = sizeof(struct sctp_auth_chunk) + sctp_get_hmac_digest_len(hmac_algo);
+ return (SCTP_SIZE32(size));
+}
+
+uint32_t
+sctp_get_hmac_digest_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ return (SCTP_AUTH_DIGEST_LEN_SHA1);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (SCTP_AUTH_DIGEST_LEN_SHA256);
+#endif
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+static inline int
+sctp_get_hmac_block_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ return (64);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (64);
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+#if defined(__Userspace__)
+/* __Userspace__ SHA1_Init is defined in libcrypto.a (libssl-dev on Ubuntu) */
+#endif
+static void
+sctp_hmac_init(uint16_t hmac_algo, sctp_hash_context_t *ctx)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SCTP_SHA1_INIT(&ctx->sha1);
+ break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SCTP_SHA256_INIT(&ctx->sha256);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_update(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+ uint8_t *text, uint32_t textlen)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SCTP_SHA1_UPDATE(&ctx->sha1, text, textlen);
+ break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SCTP_SHA256_UPDATE(&ctx->sha256, text, textlen);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+ uint8_t *digest)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SCTP_SHA1_FINAL(digest, &ctx->sha1);
+ break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SCTP_SHA256_FINAL(digest, &ctx->sha256);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+/*-
+ * Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
+ *
+ * Compute the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm. Resulting digest is placed in 'digest' and digest length
+ * is returned, if the HMAC was performed.
+ *
+ * WARNING: it is up to the caller to supply sufficient space to hold the
+ * resultant digest.
+ */
+uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+ uint8_t *text, uint32_t textlen, uint8_t *digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (text == NULL) ||
+ (textlen == 0) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ memset(ipad, 0, blocklen);
+ memset(opad, 0, blocklen);
+ memcpy(ipad, key, keylen);
+ memcpy(opad, key, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, text, textlen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/* mbuf version */
+uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+ struct mbuf *m_tmp;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ memset(ipad, 0, blocklen);
+ memset(opad, 0, blocklen);
+ memcpy(ipad, key, keylen);
+ memcpy(opad, key, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ /* find the correct starting mbuf and offset (get start of text) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+ m_offset -= SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ /* now use the rest of the mbuf chain for the text */
+ while (m_tmp != NULL) {
+ if ((SCTP_BUF_NEXT(m_tmp) == NULL) && trailer) {
+ sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+ SCTP_BUF_LEN(m_tmp) - (trailer+m_offset));
+ } else {
+ sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+ SCTP_BUF_LEN(m_tmp) - m_offset);
+ }
+
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/*
+ * computes the requested HMAC using a key struct (which may be modified if
+ * the keylen exceeds the HMAC block len).
+ */
+uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key, uint8_t *text,
+ uint32_t textlen, uint8_t *digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (text == NULL) || (textlen == 0) ||
+ (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ memcpy(key->key, temp, key->keylen);
+ }
+ return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
+ digest));
+}
+
+/* mbuf version */
+uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key, struct mbuf *m,
+ uint32_t m_offset, uint8_t *digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ memcpy(key->key, temp, key->keylen);
+ }
+ return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest, 0));
+}
+
+int
+sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id)
+{
+ int i;
+
+ if ((list == NULL) || (id == SCTP_AUTH_HMAC_ID_RSVD))
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++)
+ if (list->hmac[i] == id)
+ return (1);
+
+ /* not in the list */
+ return (0);
+}
+
+
+/*-
+ * clear any cached key(s) if they match the given key id on an association.
+ * the cached key(s) will be recomputed and re-cached at next use.
+ * ASSUMES TCB_LOCK is already held
+ */
+void
+sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ if (stcb == NULL)
+ return;
+
+ if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ stcb->asoc.authinfo.assoc_key = NULL;
+ }
+ if (keyid == stcb->asoc.authinfo.recv_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key = NULL;
+ }
+}
+
+/*-
+ * clear any cached key(s) if they match the given key id for all assocs on
+ * an endpoint.
+ * ASSUMES INP_WLOCK is already held
+ */
+void
+sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ struct sctp_tcb *stcb;
+
+ if (inp == NULL)
+ return;
+
+ /* clear the cached keys on all assocs on this instance */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_clear_cachedkeys(stcb, keyid);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+/*-
+ * delete a shared key from an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.active_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* are there other refcount holders on the key? */
+ if (skey->refcount > 1)
+ return (-1);
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys(stcb, keyid);
+ return (0);
+}
+
+/*-
+ * deletes a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* endpoint keys are not refcounted */
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys_ep(inp, keyid);
+ return (0);
+}
+
+/*-
+ * set the active key on an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey = NULL;
+
+ /* find the key on the assoc */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ return (-1);
+ }
+ if ((skey->deactivated) && (skey->refcount > 1)) {
+ /* can't reactivate a deactivated key with other refcounts */
+ return (-1);
+ }
+
+ /* set the (new) active key */
+ stcb->asoc.authinfo.active_keyid = keyid;
+ /* reset the deactivated flag */
+ skey->deactivated = 0;
+
+ return (0);
+}
+
+/*-
+ * set the active key on an endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the key */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ return (-1);
+ }
+ inp->sctp_ep.default_keyid = keyid;
+ return (0);
+}
+
+/*-
+ * deactivates a shared key from the association
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.active_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* are there other refcount holders on the key? */
+ if (skey->refcount == 1) {
+ /* no other users, send a notification for this key */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
+ SCTP_SO_LOCKED);
+ }
+
+ /* mark the key as deactivated */
+ skey->deactivated = 1;
+
+ return (0);
+}
+
+/*-
+ * deactivates a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* endpoint keys are not refcounted */
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ return (0);
+}
+
+/*
+ * get local authentication parameters from cookie (from INIT-ACK)
+ */
+void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length)
+{
+ struct sctp_paramhdr *phdr, tmp_param;
+ uint16_t plen, ptype;
+ uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_random *p_random = NULL;
+ uint16_t random_len = 0;
+ uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+
+ /* convert to upper bound */
+ length += offset;
+
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param);
+ while (phdr != NULL) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if ((plen < sizeof(struct sctp_paramhdr)) ||
+ (offset + plen > length))
+ break;
+
+ if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(random_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)random_store, plen);
+ if (phdr == NULL)
+ return;
+ /* save the random and length for the key */
+ p_random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*p_random);
+ } else if (ptype == SCTP_HMAC_LIST) {
+ uint16_t num_hmacs;
+ uint16_t i;
+
+ if (plen > sizeof(hmacs_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)hmacs_store, plen);
+ if (phdr == NULL)
+ return;
+ /* save the hmacs list and num for the key */
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ if (stcb->asoc.local_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.local_hmacs);
+ stcb->asoc.local_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ (void)sctp_auth_add_hmacid(stcb->asoc.local_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(chunks_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store, plen);
+ if (phdr == NULL)
+ return;
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ /* save chunks list and num for the key */
+ if (stcb->asoc.local_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
+ else
+ stcb->asoc.local_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.local_auth_chunks);
+ }
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length)
+ break;
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *)&tmp_param);
+ }
+ /* concatenate the full random key */
+ keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+ if (chunks != NULL) {
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (p_random != NULL) {
+ keylen = sizeof(*p_random) + random_len;
+ memcpy(new_key->key, p_random, keylen);
+ } else {
+ keylen = 0;
+ }
+ /* append in the AUTH chunks */
+ if (chunks != NULL) {
+ memcpy(new_key->key + keylen, chunks,
+ sizeof(*chunks) + num_chunks);
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ /* append in the HMACs */
+ if (hmacs != NULL) {
+ memcpy(new_key->key + keylen, hmacs,
+ sizeof(*hmacs) + hmacs_len);
+ }
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ /* negotiate what HMAC to use for the peer */
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+
+ /* copy defaults from the endpoint */
+ /* FIX ME: put in cookie? */
+ stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
+}
+
+/*
+ * compute and fill in the HMAC digest for a packet
+ */
+void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
+{
+ uint32_t digestlen;
+ sctp_sharedkey_t *skey;
+ sctp_key_t *key;
+
+ if ((stcb == NULL) || (auth == NULL))
+ return;
+
+ /* zero the digest + chunk padding */
+ digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ memset(auth->hmac, 0, SCTP_SIZE32(digestlen));
+
+ /* is the desired key cached? */
+ if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
+ (stcb->asoc.authinfo.assoc_key == NULL)) {
+ if (stcb->asoc.authinfo.assoc_key != NULL) {
+ /* free the old cached key */
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ }
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ /* the only way skey is NULL is if null key id 0 is used */
+ if (skey != NULL)
+ key = skey->key;
+ else
+ key = NULL;
+ /* compute a new assoc key and cache it */
+ stcb->asoc.authinfo.assoc_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, key);
+ stcb->asoc.authinfo.assoc_keyid = keyid;
+ SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
+ stcb->asoc.authinfo.assoc_keyid);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.assoc_key,
+ "Assoc Key");
+#endif
+ }
+
+ /* set in the active key id */
+ auth->shared_key_id = htons(keyid);
+
+ /* compute and fill in the digest */
+ (void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
+ m, auth_offset, auth->hmac);
+}
+
+
+static void
+sctp_zero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+{
+ struct mbuf *m_tmp;
+ uint8_t *data;
+
+ /* sanity check */
+ if (m == NULL)
+ return;
+
+ /* find the correct starting mbuf and offset (get start position) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+ m_offset -= SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ /* now use the rest of the mbuf chain */
+ while ((m_tmp != NULL) && (size > 0)) {
+ data = mtod(m_tmp, uint8_t *) + m_offset;
+ if (size > (uint32_t)(SCTP_BUF_LEN(m_tmp) - m_offset)) {
+ memset(data, 0, SCTP_BUF_LEN(m_tmp) - m_offset);
+ size -= SCTP_BUF_LEN(m_tmp) - m_offset;
+ } else {
+ memset(data, 0, size);
+ size = 0;
+ }
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+}
+
+/*-
+ * process the incoming Authentication chunk
+ * return codes:
+ * -1 on any authentication error
+ * 0 on authentication verification
+ */
+int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
+ struct mbuf *m, uint32_t offset)
+{
+ uint16_t chunklen;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ sctp_sharedkey_t *skey;
+ uint32_t digestlen;
+ uint8_t digest[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* auth is checked for NULL by caller */
+ chunklen = ntohs(auth->ch.chunk_length);
+ if (chunklen < sizeof(*auth)) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ return (-1);
+ }
+ SCTP_STAT_INCR(sctps_recvauth);
+
+ /* get the auth params */
+ shared_key_id = ntohs(auth->shared_key_id);
+ hmac_id = ntohs(auth->hmac_id);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
+ shared_key_id, hmac_id);
+
+#if defined(__Userspace__)
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ return (0);
+#endif
+#endif
+ /* is the indicated HMAC supported? */
+ if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
+ struct mbuf *op_err;
+ struct sctp_error_auth_invalid_hmac *cause;
+
+ SCTP_STAT_INCR(sctps_recvivalhmacid);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: unsupported HMAC id %u\n",
+ hmac_id);
+ /*
+ * report this in an Error Chunk: Unsupported HMAC
+ * Identifier
+ */
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_auth_invalid_hmac),
+ 0, M_NOWAIT, 1, MT_HEADER);
+ if (op_err != NULL) {
+ /* pre-reserve some space */
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ /* fill in the error */
+ cause = mtod(op_err, struct sctp_error_auth_invalid_hmac *);
+ cause->cause.code = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+ cause->cause.length = htons(sizeof(struct sctp_error_auth_invalid_hmac));
+ cause->hmac_id = ntohs(hmac_id);
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_auth_invalid_hmac);
+ /* queue it */
+ sctp_queue_op_err(stcb, op_err);
+ }
+ return (-1);
+ }
+ /* get the indicated shared key, if available */
+ if ((stcb->asoc.authinfo.recv_key == NULL) ||
+ (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
+ /* find the shared key on the assoc first */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+ shared_key_id);
+ /* if the shared key isn't found, discard the chunk */
+ if (skey == NULL) {
+ SCTP_STAT_INCR(sctps_recvivalkeyid);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: unknown key id %u\n",
+ shared_key_id);
+ return (-1);
+ }
+ /* generate a notification if this is a new key id */
+ if (stcb->asoc.authinfo.recv_keyid != shared_key_id)
+ /*
+ * sctp_ulp_notify(SCTP_NOTIFY_AUTH_NEW_KEY, stcb,
+ * shared_key_id, (void
+ * *)stcb->asoc.authinfo.recv_keyid);
+ */
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY,
+ shared_key_id, stcb->asoc.authinfo.recv_keyid,
+ SCTP_SO_NOT_LOCKED);
+ /* compute a new recv assoc key and cache it */
+ if (stcb->asoc.authinfo.recv_key != NULL)
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, skey->key);
+ stcb->asoc.authinfo.recv_keyid = shared_key_id;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.recv_key, "Recv Key");
+#endif
+ }
+ /* validate the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_id);
+ if (chunklen < (sizeof(*auth) + digestlen)) {
+ /* invalid digest length */
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: chunk too short for HMAC\n");
+ return (-1);
+ }
+ /* save a copy of the digest, zero the pseudo header, and validate */
+ memcpy(digest, auth->hmac, digestlen);
+ sctp_zero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+ (void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
+ m, offset, computed_digest);
+
+ /* compare the computed digest with the one in the AUTH chunk */
+ if (timingsafe_bcmp(digest, computed_digest, digestlen) != 0) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: HMAC digest check failed\n");
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Generate NOTIFICATION
+ */
+void
+sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
+ uint16_t keyid, uint16_t alt_keyid, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_authkey_event *auth;
+ struct sctp_queued_to_read *control;
+
+ if ((stcb == NULL) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+ ) {
+ /* If the socket is gone we are out of here */
+ return;
+ }
+
+ if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_AUTHEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_authkey_event),
+ 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+
+ SCTP_BUF_LEN(m_notify) = 0;
+ auth = mtod(m_notify, struct sctp_authkey_event *);
+ memset(auth, 0, sizeof(struct sctp_authkey_event));
+ auth->auth_type = SCTP_AUTHENTICATION_EVENT;
+ auth->auth_flags = 0;
+ auth->auth_length = sizeof(*auth);
+ auth->auth_keynumber = keyid;
+ auth->auth_altkeynumber = alt_keyid;
+ auth->auth_indication = indication;
+ auth->auth_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(*auth);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0, m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+/*-
+ * validates the AUTHentication related parameters in an INIT/INIT-ACK
+ * Note: currently only used for INIT as INIT-ACK is handled inline
+ * with sctp_load_addresses_from_init()
+ */
+int
+sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
+{
+ struct sctp_paramhdr *phdr, param_buf;
+ uint16_t ptype, plen;
+ int peer_supports_asconf = 0;
+ int peer_supports_auth = 0;
+ int got_random = 0, got_hmacs = 0, got_chklist = 0;
+ uint8_t saw_asconf = 0;
+ uint8_t saw_asconf_ack = 0;
+
+ /* go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &param_buf, sizeof(param_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen < sizeof(struct sctp_paramhdr)) {
+ break;
+ }
+ if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[SCTP_SMALL_CHUNK_STORE];
+ int num_ent, i;
+
+ if (plen > sizeof(local_store)) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store,
+ plen);
+ if (phdr == NULL) {
+ return (-1);
+ }
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ case SCTP_ASCONF_ACK:
+ peer_supports_asconf = 1;
+ break;
+ default:
+ /* one we don't care about */
+ break;
+ }
+ }
+ } else if (ptype == SCTP_RANDOM) {
+ /* enforce the random length */
+ if (plen != (sizeof(struct sctp_auth_random) +
+ SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: invalid RANDOM len\n");
+ return (-1);
+ }
+ got_random = 1;
+ } else if (ptype == SCTP_HMAC_LIST) {
+ struct sctp_auth_hmac_algo *hmacs;
+ uint8_t store[SCTP_PARAM_BUFFER_SIZE];
+ int num_hmacs;
+
+ if (plen > sizeof(store)) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store,
+ plen);
+ if (phdr == NULL) {
+ return (-1);
+ }
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ num_hmacs = (plen - sizeof(*hmacs)) / sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: invalid HMAC param\n");
+ return (-1);
+ }
+ got_hmacs = 1;
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ struct sctp_auth_chunk_list *chunks;
+ uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
+ int i, num_chunks;
+
+ if (plen > sizeof(chunks_store)) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store,
+ plen);
+ if (phdr == NULL) {
+ return (-1);
+ }
+ /*-
+ * Flip through the list and mark that the
+ * peer supports asconf/asconf_ack.
+ */
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ for (i = 0; i < num_chunks; i++) {
+ /* record asconf/asconf-ack if listed */
+ if (chunks->chunk_types[i] == SCTP_ASCONF)
+ saw_asconf = 1;
+ if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+ saw_asconf_ack = 1;
+
+ }
+ if (num_chunks)
+ got_chklist = 1;
+ }
+
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &param_buf,
+ sizeof(param_buf));
+ }
+ /* validate authentication required parameters */
+ if (got_random && got_hmacs) {
+ peer_supports_auth = 1;
+ } else {
+ peer_supports_auth = 0;
+ }
+ if (!peer_supports_auth && got_chklist) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: peer sent chunk list w/o AUTH\n");
+ return (-1);
+ }
+ if (peer_supports_asconf && !peer_supports_auth) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: peer supports ASCONF but not AUTH\n");
+ return (-1);
+ } else if ((peer_supports_asconf) && (peer_supports_auth) &&
+ ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+ return (-2);
+ }
+ return (0);
+}
+
+void
+sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint16_t chunks_len = 0;
+ uint16_t hmacs_len = 0;
+ uint16_t random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+ sctp_key_t *new_key;
+ uint16_t keylen;
+
+ /* initialize hmac list from endpoint */
+ stcb->asoc.local_hmacs = sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ hmacs_len = stcb->asoc.local_hmacs->num_algo *
+ sizeof(stcb->asoc.local_hmacs->hmac[0]);
+ }
+ /* initialize auth chunks list from endpoint */
+ stcb->asoc.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (stcb->asoc.local_auth_chunks != NULL) {
+ int i;
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ chunks_len++;
+ }
+ }
+ /* copy defaults from the endpoint */
+ stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
+
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
+
+ /* now set the concatenated key (random + chunks + hmacs) */
+ /* key includes parameter headers */
+ keylen = (3 * sizeof(struct sctp_paramhdr)) + random_len + chunks_len +
+ hmacs_len;
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ struct sctp_paramhdr *ph;
+ int plen;
+ /* generate and copy in the RANDOM */
+ ph = (struct sctp_paramhdr *)new_key->key;
+ ph->param_type = htons(SCTP_RANDOM);
+ plen = sizeof(*ph) + random_len;
+ ph->param_length = htons(plen);
+ SCTP_READ_RANDOM(new_key->key + sizeof(*ph), random_len);
+ keylen = plen;
+
+ /* append in the AUTH chunks */
+ /* NOTE: currently we always have chunks to list */
+ ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+ ph->param_type = htons(SCTP_CHUNK_LIST);
+ plen = sizeof(*ph) + chunks_len;
+ ph->param_length = htons(plen);
+ keylen += sizeof(*ph);
+ if (stcb->asoc.local_auth_chunks) {
+ int i;
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ new_key->key[keylen++] = i;
+ }
+ }
+
+ /* append in the HMACs */
+ ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+ ph->param_type = htons(SCTP_HMAC_LIST);
+ plen = sizeof(*ph) + hmacs_len;
+ ph->param_length = htons(plen);
+ keylen += sizeof(*ph);
+ (void)sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+ new_key->key + keylen);
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+}
+
+
+#ifdef SCTP_HMAC_TEST
+/*
+ * HMAC and key concatenation tests
+ */
+static void
+sctp_print_digest(uint8_t *digest, uint32_t digestlen, const char *str)
+{
+ uint32_t i;
+
+ SCTP_PRINTF("\n%s: 0x", str);
+ if (digest == NULL)
+ return;
+
+ for (i = 0; i < digestlen; i++)
+ SCTP_PRINTF("%02x", digest[i]);
+}
+
+static int
+sctp_test_hmac(const char *str, uint16_t hmac_id, uint8_t *key,
+ uint32_t keylen, uint8_t *text, uint32_t textlen,
+ uint8_t *digest, uint32_t digestlen)
+{
+ uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ SCTP_PRINTF("\n%s:", str);
+ sctp_hmac(hmac_id, key, keylen, text, textlen, computed_digest);
+ sctp_print_digest(digest, digestlen, "Expected digest");
+ sctp_print_digest(computed_digest, digestlen, "Computed digest");
+ if (memcmp(digest, computed_digest, digestlen) != 0) {
+ SCTP_PRINTF("\nFAILED");
+ return (-1);
+ } else {
+ SCTP_PRINTF("\nPASSED");
+ return (0);
+ }
+}
+
+
+/*
+ * RFC 2202: HMAC-SHA1 test cases
+ */
+void
+sctp_test_hmac_sha1(void)
+{
+ uint8_t *digest;
+ uint8_t key[128];
+ uint32_t keylen;
+ uint8_t text[128];
+ uint32_t textlen;
+ uint32_t digestlen = 20;
+ int failed = 0;
+
+ /*-
+ * test_case = 1
+ * key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * key_len = 20
+ * data = "Hi There"
+ * data_len = 8
+ * digest = 0xb617318655057264e28bc0b6fb378c8ef146be00
+ */
+ keylen = 20;
+ memset(key, 0x0b, keylen);
+ textlen = 8;
+ strcpy(text, "Hi There");
+ digest = "\xb6\x17\x31\x86\x55\x05\x72\x64\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1\x46\xbe\x00";
+ if (sctp_test_hmac("SHA1 test case 1", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 2
+ * key = "Jefe"
+ * key_len = 4
+ * data = "what do ya want for nothing?"
+ * data_len = 28
+ * digest = 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+ */
+ keylen = 4;
+ strcpy(key, "Jefe");
+ textlen = 28;
+ strcpy(text, "what do ya want for nothing?");
+ digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79";
+ if (sctp_test_hmac("SHA1 test case 2", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 3
+ * key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * key_len = 20
+ * data = 0xdd repeated 50 times
+ * data_len = 50
+ * digest = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+ */
+ keylen = 20;
+ memset(key, 0xaa, keylen);
+ textlen = 50;
+ memset(text, 0xdd, textlen);
+ digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3";
+ if (sctp_test_hmac("SHA1 test case 3", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 4
+ * key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
+ * key_len = 25
+ * data = 0xcd repeated 50 times
+ * data_len = 50
+ * digest = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+ */
+ keylen = 25;
+ memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
+ textlen = 50;
+ memset(text, 0xcd, textlen);
+ digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda";
+ if (sctp_test_hmac("SHA1 test case 4", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 5
+ * key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * key_len = 20
+ * data = "Test With Truncation"
+ * data_len = 20
+ * digest = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
+ * digest-96 = 0x4c1a03424b55e07fe7f27be1
+ */
+ keylen = 20;
+ memset(key, 0x0c, keylen);
+ textlen = 20;
+ strcpy(text, "Test With Truncation");
+ digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04";
+ if (sctp_test_hmac("SHA1 test case 5", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 6
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key - Hash Key First"
+ * data_len = 54
+ * digest = 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 54;
+ strcpy(text, "Test Using Larger Than Block-Size Key - Hash Key First");
+ digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12";
+ if (sctp_test_hmac("SHA1 test case 6", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*-
+ * test_case = 7
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
+ * data_len = 73
+ * digest = 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 73;
+ strcpy(text, "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data");
+ digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91";
+ if (sctp_test_hmac("SHA1 test case 7", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /* done with all tests */
+ if (failed)
+ SCTP_PRINTF("\nSHA1 test results: %d cases failed", failed);
+ else
+ SCTP_PRINTF("\nSHA1 test results: all test cases passed");
+}
+
+/*
+ * test assoc key concatenation
+ */
+static int
+sctp_test_key_concatenation(sctp_key_t *key1, sctp_key_t *key2,
+ sctp_key_t *expected_key)
+{
+ sctp_key_t *key;
+ int ret_val;
+
+ sctp_show_key(key1, "\nkey1");
+ sctp_show_key(key2, "\nkey2");
+ key = sctp_compute_hashkey(key1, key2, NULL);
+ sctp_show_key(expected_key, "\nExpected");
+ sctp_show_key(key, "\nComputed");
+ if (memcmp(key, expected_key, expected_key->keylen) != 0) {
+ SCTP_PRINTF("\nFAILED");
+ ret_val = -1;
+ } else {
+ SCTP_PRINTF("\nPASSED");
+ ret_val = 0;
+ }
+ sctp_free_key(key1);
+ sctp_free_key(key2);
+ sctp_free_key(expected_key);
+ sctp_free_key(key);
+ return (ret_val);
+}
+
+
+void
+sctp_test_authkey(void)
+{
+ sctp_key_t *key1, *key2, *expected_key;
+ int failed = 0;
+
+ /* test case 1 */
+ key1 = sctp_set_key("\x01\x01\x01\x01", 4);
+ key2 = sctp_set_key("\x01\x02\x03\x04", 4);
+ expected_key = sctp_set_key("\x01\x01\x01\x01\x01\x02\x03\x04", 8);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 2 */
+ key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+ key2 = sctp_set_key("\x02", 1);
+ expected_key = sctp_set_key("\x00\x00\x00\x01\x02", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 3 */
+ key1 = sctp_set_key("\x01", 1);
+ key2 = sctp_set_key("\x00\x00\x00\x02", 4);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x02", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 4 */
+ key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+ key2 = sctp_set_key("\x01", 1);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 5 */
+ key1 = sctp_set_key("\x01", 1);
+ key2 = sctp_set_key("\x00\x00\x00\x01", 4);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 6 */
+ key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+ key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+ expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 7 */
+ key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+ key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+ expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* done with all tests */
+ if (failed)
+ SCTP_PRINTF("\nKey concatenation test results: %d cases failed", failed);
+ else
+ SCTP_PRINTF("\nKey concatenation test results: all test cases passed");
+}
+
+
+#if defined(STANDALONE_HMAC_TEST)
+int
+main(void)
+{
+ sctp_test_hmac_sha1();
+ sctp_test_authkey();
+}
+
+#endif /* STANDALONE_HMAC_TEST */
+
+#endif /* SCTP_HMAC_TEST */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.h
new file mode 100644
index 000000000..3dc5a59e2
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_auth.h
@@ -0,0 +1,216 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 338749 2018-09-18 10:53:07Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_AUTH_H_
+#define _NETINET_SCTP_AUTH_H_
+
+#include <netinet/sctp_os.h>
+
+/* digest lengths */
+#define SCTP_AUTH_DIGEST_LEN_SHA1 20
+#define SCTP_AUTH_DIGEST_LEN_SHA256 32
+#define SCTP_AUTH_DIGEST_LEN_MAX SCTP_AUTH_DIGEST_LEN_SHA256
+
+/* random sizes */
+#define SCTP_AUTH_RANDOM_SIZE_DEFAULT 32
+#define SCTP_AUTH_RANDOM_SIZE_REQUIRED 32
+
+/* union of all supported HMAC algorithm contexts */
+typedef union sctp_hash_context {
+ SCTP_SHA1_CTX sha1;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+ SCTP_SHA256_CTX sha256;
+#endif
+} sctp_hash_context_t;
+
+typedef struct sctp_key {
+ uint32_t keylen;
+ uint8_t key[];
+} sctp_key_t;
+
+typedef struct sctp_shared_key {
+ LIST_ENTRY(sctp_shared_key) next;
+ sctp_key_t *key; /* key text */
+ uint32_t refcount; /* reference count */
+ uint16_t keyid; /* shared key ID */
+ uint8_t deactivated; /* key is deactivated */
+} sctp_sharedkey_t;
+
+LIST_HEAD(sctp_keyhead, sctp_shared_key);
+
+/* authentication chunks list */
+typedef struct sctp_auth_chklist {
+ uint8_t chunks[256];
+ uint8_t num_chunks;
+} sctp_auth_chklist_t;
+
+/* hmac algos supported list */
+typedef struct sctp_hmaclist {
+ uint16_t max_algo; /* max algorithms allocated */
+ uint16_t num_algo; /* num algorithms used */
+ uint16_t hmac[];
+} sctp_hmaclist_t;
+
+/* authentication info */
+typedef struct sctp_authinformation {
+ sctp_key_t *random; /* local random key (concatenated) */
+ uint32_t random_len; /* local random number length for param */
+ sctp_key_t *peer_random;/* peer's random key (concatenated) */
+ sctp_key_t *assoc_key; /* cached concatenated send key */
+ sctp_key_t *recv_key; /* cached concatenated recv key */
+ uint16_t active_keyid; /* active send keyid */
+ uint16_t assoc_keyid; /* current send keyid (cached) */
+ uint16_t recv_keyid; /* last recv keyid (cached) */
+} sctp_authinfo_t;
+
+
+
+/*
+ * Macros
+ */
+#define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
+
+/*
+ * function prototypes
+ */
+
+/* socket option api functions */
+extern sctp_auth_chklist_t *sctp_alloc_chunklist(void);
+extern void sctp_free_chunklist(sctp_auth_chklist_t *chklist);
+extern void sctp_clear_chunklist(sctp_auth_chklist_t *chklist);
+extern sctp_auth_chklist_t *sctp_copy_chunklist(sctp_auth_chklist_t *chklist);
+extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list);
+extern int sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list,
+ uint8_t *ptr);
+extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t *list,
+ uint8_t *ptr);
+extern int sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t *list);
+
+/* key handling */
+extern sctp_key_t *sctp_alloc_key(uint32_t keylen);
+extern void sctp_free_key(sctp_key_t *key);
+extern void sctp_print_key(sctp_key_t *key, const char *str);
+extern void sctp_show_key(sctp_key_t *key, const char *str);
+extern sctp_key_t *sctp_generate_random_key(uint32_t keylen);
+extern sctp_key_t *sctp_set_key(uint8_t *key, uint32_t keylen);
+extern sctp_key_t *sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2,
+ sctp_key_t *shared);
+
+/* shared key handling */
+extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
+extern void sctp_free_sharedkey(sctp_sharedkey_t *skey);
+extern sctp_sharedkey_t *sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+ uint16_t key_id);
+extern int sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t *new_skey);
+extern int sctp_copy_skeylist(const struct sctp_keyhead *src,
+ struct sctp_keyhead *dest);
+/* ref counts on shared keys, by key id */
+extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid,
+ int so_locked);
+
+
+/* hmac list handling */
+extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint16_t num_hmacs);
+extern void sctp_free_hmaclist(sctp_hmaclist_t *list);
+extern int sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id);
+extern sctp_hmaclist_t *sctp_copy_hmaclist(sctp_hmaclist_t *list);
+extern sctp_hmaclist_t *sctp_default_supported_hmaclist(void);
+extern uint16_t sctp_negotiate_hmacid(sctp_hmaclist_t *peer,
+ sctp_hmaclist_t *local);
+extern int sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr);
+extern int sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs,
+ uint32_t num_hmacs);
+
+extern sctp_authinfo_t *sctp_alloc_authinfo(void);
+extern void sctp_free_authinfo(sctp_authinfo_t *authinfo);
+
+/* keyed-HMAC functions */
+extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
+extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
+extern uint32_t sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+ uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern uint32_t sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key,
+ uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id);
+
+/* mbuf versions */
+extern uint32_t sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer);
+extern uint32_t sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key,
+ struct mbuf *m, uint32_t m_offset, uint8_t *digest);
+
+/*
+ * authentication routines
+ */
+extern void sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+
+extern void sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length);
+extern void sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
+extern struct mbuf *sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret, uint32_t *offset,
+ struct sctp_tcb *stcb, uint8_t chunk);
+extern int sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
+ struct mbuf *m, uint32_t offset);
+extern void sctp_notify_authentication(struct sctp_tcb *stcb,
+ uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
+extern int sctp_validate_init_auth_params(struct mbuf *m, int offset,
+ int limit);
+extern void sctp_initialize_auth_params(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb);
+
+/* test functions */
+#ifdef SCTP_HMAC_TEST
+extern void sctp_test_hmac_sha1(void);
+extern void sctp_test_authkey(void);
+#endif
+#endif /* __SCTP_AUTH_H__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.c
new file mode 100644
index 000000000..40e308f8f
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.c
@@ -0,0 +1,996 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 358080 2020-02-18 19:41:55Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_indata.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/unistd.h>
+#endif
+
+/* Declare all of our malloc named types */
+MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
+MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
+MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
+MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
+MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
+MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
+MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
+MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
+MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
+MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
+MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
+MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
+MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
+MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
+MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
+MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
+MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
+MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
+MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
+MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
+
+/* Global NON-VNET structure that controls the iterator */
+struct iterator_control sctp_it_ctl;
+
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+static void
+sctp_cleanup_itqueue(void)
+{
+ struct sctp_iterator *it, *nit;
+
+ TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ SCTP_FREE(it, SCTP_M_ITER);
+ }
+}
+#endif
+#if defined(__Userspace__)
+/*__Userspace__ TODO if we use thread based iterator
+ * then the implementation of wakeup will need to change.
+ * Currently we are using timeo_cond for ident so_timeo
+ * but that is not sufficient if we need to use another ident
+ * like wakeup(&sctppcbinfo.iterator_running);
+ */
+#endif
+
+void
+sctp_wakeup_iterator(void)
+{
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(_WIN32)
+ WakeAllConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+ pthread_cond_broadcast(&sctp_it_ctl.iterator_wakeup);
+#endif
+#else
+ wakeup(&sctp_it_ctl.iterator_running);
+#endif
+}
+
+#if defined(__Userspace__)
+static void *
+#else
+static void
+#endif
+sctp_iterator_thread(void *v SCTP_UNUSED)
+{
+#if defined(__Userspace__)
+ sctp_userspace_set_threadname("SCTP iterator");
+#endif
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ /* In FreeBSD this thread never terminates. */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ for (;;) {
+#else
+ while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) == 0) {
+#endif
+#if !defined(__Userspace__)
+ msleep(&sctp_it_ctl.iterator_running,
+#if defined(__FreeBSD__)
+ &sctp_it_ctl.ipi_iterator_wq_mtx,
+#elif defined(__APPLE__)
+ sctp_it_ctl.ipi_iterator_wq_mtx,
+#endif
+ 0, "waiting_for_work", 0);
+#else
+#if defined(_WIN32)
+ SleepConditionVariableCS(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx, INFINITE);
+#else
+ pthread_cond_wait(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx);
+#endif
+#endif
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+ break;
+ }
+#endif
+ sctp_iterator_worker();
+ }
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ /* Now this thread needs to be terminated */
+ sctp_cleanup_itqueue();
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_EXITED;
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__Userspace__)
+ sctp_wakeup_iterator();
+ return (NULL);
+#else
+ wakeup(&sctp_it_ctl.iterator_flags);
+ thread_terminate(current_thread());
+#ifdef INVARIANTS
+ panic("Hmm. thread_terminate() continues...");
+#endif
+#endif
+#endif
+}
+
+void
+sctp_startup_iterator(void)
+{
+ if (sctp_it_ctl.thread_proc) {
+ /* You only get one */
+ return;
+ }
+ /* Initialize global locks here, thus only once. */
+ SCTP_ITERATOR_LOCK_INIT();
+ SCTP_IPI_ITERATOR_WQ_INIT();
+ TAILQ_INIT(&sctp_it_ctl.iteratorhead);
+#if defined(__Userspace__)
+ if (sctp_userspace_thread_create(&sctp_it_ctl.thread_proc, &sctp_iterator_thread)) {
+ SCTP_PRINTF("ERROR: Creating sctp_iterator_thread failed.\n");
+ } else {
+ SCTP_BASE_VAR(iterator_thread_started) = 1;
+ }
+#elif defined(__FreeBSD__)
+ kproc_create(sctp_iterator_thread,
+ (void *)NULL,
+ &sctp_it_ctl.thread_proc,
+ RFPROC,
+ SCTP_KTHREAD_PAGES,
+ SCTP_KTRHEAD_NAME);
+#elif defined(__APPLE__)
+ kernel_thread_start((thread_continue_t)sctp_iterator_thread, NULL, &sctp_it_ctl.thread_proc);
+#endif
+}
+
+#ifdef INET6
+
+#if defined(__Userspace__)
+/* __Userspace__ TODO. struct in6_ifaddr is defined in sys/netinet6/in6_var.h
+ ip6_use_deprecated is defined as int ip6_use_deprecated = 1; in /src/sys/netinet6/in6_proto.c
+ */
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+ return; /* stub */
+}
+#else
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa->ifa;
+ ifa->flags = ifa6->ia6_flags;
+ if (!MODULE_GLOBAL(ip6_use_deprecated)) {
+ if (ifa->flags &
+ IN6_IFF_DEPRECATED) {
+ ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+ if (ifa->flags &
+ (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST |
+ IN6_IFF_NOTREADY)) {
+ ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+}
+#endif /* __Userspace__ */
+#endif /* INET6 */
+
+
+#if !defined(__Userspace__)
+static uint32_t
+sctp_is_desired_interface_type(struct ifnet *ifn)
+{
+ int result;
+
+ /* check the interface type to see if it's one we care about */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ switch(ifnet_type(ifn)) {
+#else
+ switch (ifn->if_type) {
+#endif
+ case IFT_ETHER:
+ case IFT_ISO88023:
+ case IFT_ISO88024:
+ case IFT_ISO88025:
+ case IFT_ISO88026:
+ case IFT_STARLAN:
+ case IFT_P10:
+ case IFT_P80:
+ case IFT_HY:
+ case IFT_FDDI:
+ case IFT_XETHER:
+ case IFT_ISDNBASIC:
+ case IFT_ISDNPRIMARY:
+ case IFT_PTPSERIAL:
+ case IFT_OTHER:
+ case IFT_PPP:
+ case IFT_LOOP:
+ case IFT_SLIP:
+ case IFT_GIF:
+ case IFT_L2VLAN:
+ case IFT_STF:
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ case IFT_IP:
+ case IFT_IPOVERCDLC:
+ case IFT_IPOVERCLAW:
+ case IFT_PROPVIRTUAL: /* NetGraph Virtual too */
+ case IFT_VIRTUALIPADDRESS:
+#endif
+ result = 1;
+ break;
+ default:
+ result = 0;
+ }
+
+ return (result);
+}
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+int
+sctp_is_vmware_interface(struct ifnet *ifn)
+{
+ return (strncmp(ifnet_name(ifn), "vmnet", 5) == 0);
+}
+#endif
+
+#if defined(_WIN32) && defined(__Userspace__)
+#ifdef MALLOC
+#undef MALLOC
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#endif
+#ifdef FREE
+#undef FREE
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+#endif
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+#if defined(INET) || defined(INET6)
+ struct sctp_ifa *sctp_ifa;
+ DWORD Err, AdapterAddrsSize;
+ PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
+ PIP_ADAPTER_UNICAST_ADDRESS pUnicast;
+#endif
+
+#ifdef INET
+ AdapterAddrsSize = 0;
+
+ if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+ if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+ SCTP_PRINTF("GetAdaptersV4Addresses() sizing failed with error code %d\n", Err);
+ SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+ return;
+ }
+ }
+
+ /* Allocate memory from sizing information */
+ if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+ SCTP_PRINTF("Memory allocation error!\n");
+ return;
+ }
+ /* Get actual adapter information */
+ if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+ SCTP_PRINTF("GetAdaptersV4Addresses() failed with error code %d\n", Err);
+ FREE(pAdapterAddrs);
+ return;
+ }
+ /* Enumerate through each returned adapter and save its information */
+ for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+ if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+ for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
+ if (IN4_ISLINKLOCAL_ADDRESS(&(((struct sockaddr_in *)(pUnicast->Address.lpSockaddr))->sin_addr))) {
+ continue;
+ }
+ sctp_ifa = sctp_add_addr_to_vrf(0,
+ NULL,
+ pAdapt->IfIndex,
+ (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+ pAdapt->AdapterName,
+ NULL,
+ pUnicast->Address.lpSockaddr,
+ pAdapt->Flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ }
+ }
+ FREE(pAdapterAddrs);
+#endif
+#ifdef INET6
+ AdapterAddrsSize = 0;
+
+ if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+ if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+ SCTP_PRINTF("GetAdaptersV6Addresses() sizing failed with error code %d\n", Err);
+ SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+ return;
+ }
+ }
+ /* Allocate memory from sizing information */
+ if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+ SCTP_PRINTF("Memory allocation error!\n");
+ return;
+ }
+ /* Get actual adapter information */
+ if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+ SCTP_PRINTF("GetAdaptersV6Addresses() failed with error code %d\n", Err);
+ FREE(pAdapterAddrs);
+ return;
+ }
+ /* Enumerate through each returned adapter and save its information */
+ for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+ if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+ for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
+ sctp_ifa = sctp_add_addr_to_vrf(0,
+ NULL,
+ pAdapt->Ipv6IfIndex,
+ (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+ pAdapt->AdapterName,
+ NULL,
+ pUnicast->Address.lpSockaddr,
+ pAdapt->Flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ }
+ }
+ FREE(pAdapterAddrs);
+#endif
+}
+#elif defined(__Userspace__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+#if defined(INET) || defined(INET6)
+ int rc;
+ struct ifaddrs *ifa, *ifas;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t ifa_flags;
+
+ rc = getifaddrs(&ifas);
+ if (rc != 0) {
+ return;
+ }
+ for (ifa = ifas; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr == NULL) {
+ continue;
+ }
+#if !defined(INET)
+ if (ifa->ifa_addr->sa_family != AF_INET6) {
+ /* non inet6 skip */
+ continue;
+ }
+#elif !defined(INET6)
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ /* non inet skip */
+ continue;
+ }
+#else
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+ /* non inet/inet6 skip */
+ continue;
+ }
+#endif
+#if defined(INET6)
+ if ((ifa->ifa_addr->sa_family == AF_INET6) &&
+ IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+#endif
+#if defined(INET)
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ ((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+ continue;
+ }
+#endif
+ ifa_flags = 0;
+ sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+ NULL,
+ if_nametoindex(ifa->ifa_name),
+ 0,
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_addr,
+ ifa_flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ freeifaddrs(ifas);
+#endif
+}
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+ /* Here we must apply ANY locks needed by the
+ * IFN we access and also make sure we lock
+ * any IFA that exists as we float through the
+ * list of IFA's
+ */
+ struct ifnet **ifnetlist;
+ uint32_t i, j, count;
+ char name[SCTP_IFNAMSIZ];
+ struct ifnet *ifn;
+ struct ifaddr **ifaddrlist;
+ struct ifaddr *ifa;
+ struct in6_ifaddr *ifa6;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t ifa_flags;
+
+ if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+ return;
+ }
+ for (i = 0; i < count; i++) {
+ ifn = ifnetlist[i];
+ if (SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) && sctp_is_vmware_interface(ifn)) {
+ continue;
+ }
+ if (sctp_is_desired_interface_type(ifn) == 0) {
+ /* non desired type */
+ continue;
+ }
+ if (ifnet_get_address_list(ifn, &ifaddrlist) != 0) {
+ continue;
+ }
+ for (j = 0; ifaddrlist[j] != NULL; j++) {
+ ifa = ifaddrlist[j];
+ if (ifa->ifa_addr == NULL) {
+ continue;
+ }
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+ /* non inet/inet6 skip */
+ continue;
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+ } else {
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) {
+ continue;
+ }
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ ifa6 = (struct in6_ifaddr *)ifa;
+ ifa_flags = ifa6->ia6_flags;
+ } else {
+ ifa_flags = 0;
+ }
+ SCTP_SNPRINTF(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn));
+ sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+ (void *)ifn, /* XXX */
+ ifnet_index(ifn),
+ ifnet_type(ifn),
+ name,
+ (void *)ifa, /* XXX */
+ ifa->ifa_addr,
+ ifa_flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ ifnet_free_address_list(ifaddrlist);
+ }
+ ifnet_list_free(ifnetlist);
+}
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+ /* Here we must apply ANY locks needed by the
+ * IFN we access and also make sure we lock
+ * any IFA that exists as we float through the
+ * list of IFA's
+ */
+ struct epoch_tracker et;
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t ifa_flags;
+#ifdef INET6
+ struct in6_ifaddr *ifa6;
+#endif
+
+ IFNET_RLOCK();
+ NET_EPOCH_ENTER(et);
+ CK_STAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
+ if (sctp_is_desired_interface_type(ifn) == 0) {
+ /* non desired type */
+ continue;
+ }
+ CK_STAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr == NULL) {
+ continue;
+ }
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifa_flags = 0;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ifa6 = (struct in6_ifaddr *)ifa;
+ ifa_flags = ifa6->ia6_flags;
+ break;
+#endif
+ default:
+ ifa_flags = 0;
+ break;
+ }
+ sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+ (void *)ifn,
+ ifn->if_index,
+ ifn->if_type,
+ ifn->if_xname,
+ (void *)ifa,
+ ifa->ifa_addr,
+ ifa_flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ }
+ NET_EPOCH_EXIT(et);
+ IFNET_RUNLOCK();
+}
+#endif
+
+void
+sctp_init_vrf_list(int vrfid)
+{
+ if (vrfid > SCTP_MAX_VRF_ID)
+ /* can't do that */
+ return;
+
+ /* Don't care about return here */
+ (void)sctp_allocate_vrf(vrfid);
+
+ /* Now we need to build all the ifn's
+ * for this vrf and there addresses
+ */
+ sctp_init_ifns_for_vrf(vrfid);
+}
+
+void
+sctp_addr_change(struct ifaddr *ifa, int cmd)
+{
+#if defined(__Userspace__)
+ return;
+#else
+ uint32_t ifa_flags = 0;
+
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ return;
+ }
+ /* BSD only has one VRF, if this changes
+ * we will need to hook in the right
+ * things here to get the id to pass to
+ * the address management routine.
+ */
+ if (SCTP_BASE_VAR(first_time) == 0) {
+ /* Special test to see if my ::1 will showup with this */
+ SCTP_BASE_VAR(first_time) = 1;
+ sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
+ }
+
+ if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
+ /* don't know what to do with this */
+ return;
+ }
+
+ if (ifa->ifa_addr == NULL) {
+ return;
+ }
+ if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
+ /* non desired type */
+ return;
+ }
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+ return;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
+ if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ return;
+ }
+ break;
+#endif
+ default:
+ /* non inet/inet6 skip */
+ return;
+ }
+ if (cmd == RTM_ADD) {
+ (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
+#if defined(__APPLE__) && !defined(__Userspace__)
+ ifnet_index(ifa->ifa_ifp), ifnet_type(ifa->ifa_ifp), ifnet_name(ifa->ifa_ifp),
+#else
+ ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
+#endif
+ (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
+ } else {
+
+ sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
+#if defined(__APPLE__) && !defined(__Userspace__)
+ ifnet_index(ifa->ifa_ifp),
+ ifnet_name(ifa->ifa_ifp));
+#else
+ ifa->ifa_ifp->if_index,
+ ifa->ifa_ifp->if_xname);
+#endif
+
+ /* We don't bump refcount here so when it completes
+ * the final delete will happen.
+ */
+ }
+#endif
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+void
+sctp_addr_change_event_handler(void *arg __unused, struct ifaddr *ifa, int cmd) {
+ sctp_addr_change(ifa, cmd);
+}
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+void
+sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
+{
+ struct ifnet **ifnetlist;
+ struct ifaddr **ifaddrlist;
+ uint32_t i, j, count;
+
+ if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+ return;
+ }
+ for (i = 0; i < count; i++) {
+ if (!(*pred)(ifnetlist[i])) {
+ continue;
+ }
+ if (ifnet_get_address_list(ifnetlist[i], &ifaddrlist) != 0) {
+ continue;
+ }
+ for (j = 0; ifaddrlist[j] != NULL; j++) {
+ sctp_addr_change(ifaddrlist[j], add ? RTM_ADD : RTM_DELETE);
+ }
+ ifnet_free_address_list(ifaddrlist);
+ }
+ ifnet_list_free(ifnetlist);
+ return;
+}
+#endif
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
+ int how, int allonebuf, int type)
+{
+ struct mbuf *m = NULL;
+#if defined(__FreeBSD__) || defined(__Userspace__)
+#if defined(__Userspace__)
+ m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0, allonebuf);
+#else
+ m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
+#endif
+ if (m == NULL) {
+ /* bad, no memory */
+ return (m);
+ }
+#if !defined(__Userspace__)
+ if (allonebuf) {
+ if (SCTP_BUF_SIZE(m) < space_needed) {
+ m_freem(m);
+ return (NULL);
+ }
+ KASSERT(SCTP_BUF_NEXT(m) == NULL, ("%s: no chain allowed", __FUNCTION__));
+ }
+#endif
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mb(m, SCTP_MBUF_IALLOC);
+ }
+#endif
+#else
+ int mbuf_threshold;
+ unsigned int size;
+
+ if (want_header) {
+ MGETHDR(m, how, type);
+ size = MHLEN;
+ } else {
+ MGET(m, how, type);
+ size = MLEN;
+ }
+ if (m == NULL) {
+ return (NULL);
+ }
+ if (allonebuf == 0) {
+ mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+ } else {
+ mbuf_threshold = 1;
+ }
+
+ if (space_needed > (unsigned int)(((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+ MCLGET(m, how);
+ if (m == NULL) {
+ return (NULL);
+ }
+ if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+ sctp_m_freem(m);
+ return (NULL);
+ }
+ size = SCTP_BUF_EXTEND_SIZE(m);
+ }
+ if (allonebuf != 0 && size < space_needed) {
+ m_freem(m);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(m) = 0;
+ SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mb(m, SCTP_MBUF_IALLOC);
+ }
+#endif
+#endif
+ return (m);
+}
+
+
+#ifdef SCTP_PACKET_LOGGING
+void
+sctp_packet_log(struct mbuf *m)
+{
+ int *lenat, thisone;
+ void *copyto;
+ uint32_t *tick_tock;
+ int length;
+ int total_len;
+ int grabbed_lock = 0;
+ int value, newval, thisend, thisbegin;
+ /*
+ * Buffer layout.
+ * -sizeof this entry (total_len)
+ * -previous end (value)
+ * -ticks of log (ticks)
+ * o -ip packet
+ * o -as logged
+ * - where this started (thisbegin)
+ * x <--end points here
+ */
+ length = SCTP_HEADER_LEN(m);
+ total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
+ /* Log a packet to the buffer. */
+ if (total_len> SCTP_PACKET_LOG_SIZE) {
+ /* Can't log this packet I have not a buffer big enough */
+ return;
+ }
+ if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
+ return;
+ }
+ atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+ try_again:
+ if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ SCTP_IP_PKTLOG_LOCK();
+ grabbed_lock = 1;
+ again_locked:
+ value = SCTP_BASE_VAR(packet_log_end);
+ newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+ if (newval >= SCTP_PACKET_LOG_SIZE) {
+ /* we wrapped */
+ thisbegin = 0;
+ thisend = total_len;
+ } else {
+ thisbegin = SCTP_BASE_VAR(packet_log_end);
+ thisend = newval;
+ }
+ if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+ goto again_locked;
+ }
+ } else {
+ value = SCTP_BASE_VAR(packet_log_end);
+ newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+ if (newval >= SCTP_PACKET_LOG_SIZE) {
+ /* we wrapped */
+ thisbegin = 0;
+ thisend = total_len;
+ } else {
+ thisbegin = SCTP_BASE_VAR(packet_log_end);
+ thisend = newval;
+ }
+ if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+ goto try_again;
+ }
+ }
+ /* Sanity check */
+ if (thisend >= SCTP_PACKET_LOG_SIZE) {
+ SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
+ thisbegin,
+ thisend,
+ SCTP_BASE_VAR(packet_log_writers),
+ grabbed_lock,
+ SCTP_BASE_VAR(packet_log_end));
+ SCTP_BASE_VAR(packet_log_end) = 0;
+ goto no_log;
+
+ }
+ lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
+ *lenat = total_len;
+ lenat++;
+ *lenat = value;
+ lenat++;
+ tick_tock = (uint32_t *)lenat;
+ lenat++;
+ *tick_tock = sctp_get_tick_count();
+ copyto = (void *)lenat;
+ thisone = thisend - sizeof(int);
+ lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
+ *lenat = thisbegin;
+ if (grabbed_lock) {
+ SCTP_IP_PKTLOG_UNLOCK();
+ grabbed_lock = 0;
+ }
+ m_copydata(m, 0, length, (caddr_t)copyto);
+ no_log:
+ if (grabbed_lock) {
+ SCTP_IP_PKTLOG_UNLOCK();
+ }
+ atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+}
+
+
+int
+sctp_copy_out_packet_log(uint8_t *target, int length)
+{
+ /* We wind through the packet log starting at
+ * start copying up to length bytes out.
+ * We return the number of bytes copied.
+ */
+ int tocopy, this_copy;
+ int *lenat;
+ int did_delay = 0;
+
+ tocopy = length;
+ if (length < (int)(2 * sizeof(int))) {
+ /* not enough room */
+ return (0);
+ }
+ if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
+ again:
+ if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
+ /* we delay here for just a moment hoping the writer(s) that were
+ * present when we entered will have left and we only have
+ * locking ones that will contend with us for the lock. This
+ * does not assure 100% access, but its good enough for
+ * a logging facility like this.
+ */
+ did_delay = 1;
+ DELAY(10);
+ goto again;
+ }
+ }
+ SCTP_IP_PKTLOG_LOCK();
+ lenat = (int *)target;
+ *lenat = SCTP_BASE_VAR(packet_log_end);
+ lenat++;
+ this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
+ memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
+ if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
+ SCTP_PKTLOG_WRITERS_NEED_LOCK);
+ }
+ SCTP_IP_PKTLOG_UNLOCK();
+ return (this_copy + sizeof(int));
+}
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.h
new file mode 100644
index 000000000..05d1b3fee
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_bsd_addr.h
@@ -0,0 +1,73 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 353480 2019-10-13 18:17:08Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_BSD_ADDR_H_
+#define _NETINET_SCTP_BSD_ADDR_H_
+
+#include <netinet/sctp_pcb.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+extern struct iterator_control sctp_it_ctl;
+void sctp_wakeup_iterator(void);
+
+void sctp_startup_iterator(void);
+
+
+#ifdef INET6
+void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa);
+#endif
+
+#ifdef SCTP_PACKET_LOGGING
+
+void sctp_packet_log(struct mbuf *m);
+int sctp_copy_out_packet_log(uint8_t *target, int length);
+
+#endif
+
+void sctp_addr_change(struct ifaddr *ifa, int cmd);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+
+void sctp_addr_change_event_handler(void *, struct ifaddr *, int);
+#endif
+
+void sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add);
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.c
new file mode 100644
index 000000000..4c9be7568
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.c
@@ -0,0 +1,249 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__Userspace__)
+#include <sys/types.h>
+#if !defined(_WIN32)
+#include <sys/wait.h>
+#include <unistd.h>
+#include <pthread.h>
+#endif
+#if defined(__native_client__)
+#include <sys/select.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <user_atomic.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#else
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_callout.h>
+#include <netinet/sctp_pcb.h>
+#endif
+#include <netinet/sctputil.h>
+
+/*
+ * Callout/Timer routines for OS that doesn't have them
+ */
+#if defined(__APPLE__) || defined(__Userspace__)
+static uint32_t ticks = 0;
+#else
+extern int ticks;
+#endif
+
+uint32_t sctp_get_tick_count(void) {
+ uint32_t ret;
+
+ SCTP_TIMERQ_LOCK();
+ ret = ticks;
+ SCTP_TIMERQ_UNLOCK();
+ return ret;
+}
+
+/*
+ * SCTP_TIMERQ_LOCK protects:
+ * - SCTP_BASE_INFO(callqueue)
+ * - sctp_os_timer_next: next timer to check
+ */
+static sctp_os_timer_t *sctp_os_timer_next = NULL;
+
+void
+sctp_os_timer_init(sctp_os_timer_t *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+int
+sctp_os_timer_start(sctp_os_timer_t *c, uint32_t to_ticks, void (*ftn) (void *),
+ void *arg)
+{
+ int ret = 0;
+
+ /* paranoia */
+ if ((c == NULL) || (ftn == NULL))
+ return (ret);
+
+ SCTP_TIMERQ_LOCK();
+ /* check to see if we're rescheduling a timer */
+ if (c->c_flags & SCTP_CALLOUT_PENDING) {
+ ret = 1;
+ if (c == sctp_os_timer_next) {
+ sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+ }
+ TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+ /*
+ * part of the normal "stop a pending callout" process
+ * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
+ * flags. We don't bother since we are setting these
+ * below and we still hold the lock.
+ */
+ }
+
+ /*
+ * We could unlock/splx here and lock/spl at the TAILQ_INSERT_TAIL,
+ * but there's no point since doing this setup doesn't take much time.
+ */
+ if (to_ticks == 0)
+ to_ticks = 1;
+
+ c->c_arg = arg;
+ c->c_flags = (SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+ c->c_func = ftn;
+ c->c_time = ticks + to_ticks;
+ TAILQ_INSERT_TAIL(&SCTP_BASE_INFO(callqueue), c, tqe);
+ SCTP_TIMERQ_UNLOCK();
+ return (ret);
+}
+
+int
+sctp_os_timer_stop(sctp_os_timer_t *c)
+{
+ SCTP_TIMERQ_LOCK();
+ /*
+ * Don't attempt to delete a callout that's not on the queue.
+ */
+ if (!(c->c_flags & SCTP_CALLOUT_PENDING)) {
+ c->c_flags &= ~SCTP_CALLOUT_ACTIVE;
+ SCTP_TIMERQ_UNLOCK();
+ return (0);
+ }
+ c->c_flags &= ~(SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+ if (c == sctp_os_timer_next) {
+ sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+ }
+ TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+ SCTP_TIMERQ_UNLOCK();
+ return (1);
+}
+
+void
+sctp_handle_tick(uint32_t elapsed_ticks)
+{
+ sctp_os_timer_t *c;
+ void (*c_func)(void *);
+ void *c_arg;
+
+ SCTP_TIMERQ_LOCK();
+ /* update our tick count */
+ ticks += elapsed_ticks;
+ c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
+ while (c) {
+ if (SCTP_UINT32_GE(ticks, c->c_time)) {
+ sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+ TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+ c_func = c->c_func;
+ c_arg = c->c_arg;
+ c->c_flags &= ~SCTP_CALLOUT_PENDING;
+ SCTP_TIMERQ_UNLOCK();
+ c_func(c_arg);
+ SCTP_TIMERQ_LOCK();
+ c = sctp_os_timer_next;
+ } else {
+ c = TAILQ_NEXT(c, tqe);
+ }
+ }
+ sctp_os_timer_next = NULL;
+ SCTP_TIMERQ_UNLOCK();
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+void
+sctp_timeout(void *arg SCTP_UNUSED)
+{
+ sctp_handle_tick(SCTP_BASE_VAR(sctp_main_timer_ticks));
+ sctp_start_main_timer();
+}
+#endif
+
+#if defined(__Userspace__)
+#define TIMEOUT_INTERVAL 10
+
+void *
+user_sctp_timer_iterate(void *arg)
+{
+ sctp_userspace_set_threadname("SCTP timer");
+ for (;;) {
+#if defined(_WIN32)
+ Sleep(TIMEOUT_INTERVAL);
+#else
+ struct timespec amount, remaining;
+
+ remaining.tv_sec = 0;
+ remaining.tv_nsec = TIMEOUT_INTERVAL * 1000 * 1000;
+ do {
+ amount = remaining;
+ } while (nanosleep(&amount, &remaining) == -1);
+#endif
+ if (atomic_cmpset_int(&SCTP_BASE_VAR(timer_thread_should_exit), 1, 1)) {
+ break;
+ }
+ sctp_handle_tick(sctp_msecs_to_ticks(TIMEOUT_INTERVAL));
+ }
+ return (NULL);
+}
+
+void
+sctp_start_timer_thread(void)
+{
+ /*
+ * No need to do SCTP_TIMERQ_LOCK_INIT();
+ * here, it is being done in sctp_pcb_init()
+ */
+ int rc;
+
+ rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(timer_thread), user_sctp_timer_iterate);
+ if (rc) {
+ SCTP_PRINTF("ERROR; return code from sctp_thread_create() is %d\n", rc);
+ } else {
+ SCTP_BASE_VAR(timer_thread_started) = 1;
+ }
+}
+
+void
+sctp_stop_timer_thread(void)
+{
+ atomic_cmpset_int(&SCTP_BASE_VAR(timer_thread_should_exit), 0, 1);
+ if (SCTP_BASE_VAR(timer_thread_started)) {
+#if defined(_WIN32)
+ WaitForSingleObject(SCTP_BASE_VAR(timer_thread), INFINITE);
+ CloseHandle(SCTP_BASE_VAR(timer_thread));
+#else
+ pthread_join(SCTP_BASE_VAR(timer_thread), NULL);
+#endif
+ }
+}
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.h
new file mode 100644
index 000000000..81fd8530d
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_callout.h
@@ -0,0 +1,119 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifndef _NETINET_SCTP_CALLOUT_
+#define _NETINET_SCTP_CALLOUT_
+
+/*
+ * NOTE: the following MACROS are required for locking the callout
+ * queue along with a lock/mutex in the OS specific headers and
+ * implementation files::
+ * - SCTP_TIMERQ_LOCK()
+ * - SCTP_TIMERQ_UNLOCK()
+ * - SCTP_TIMERQ_LOCK_INIT()
+ * - SCTP_TIMERQ_LOCK_DESTROY()
+ */
+
+#define _SCTP_NEEDS_CALLOUT_ 1
+
+#define SCTP_TICKS_PER_FASTTIMO 20 /* called about every 20ms */
+
+#if defined(__Userspace__)
+#if defined(_WIN32)
+#define SCTP_TIMERQ_LOCK() EnterCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK() LeaveCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_INIT() InitializeCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_DESTROY() DeleteCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#else
+#ifdef INVARIANTS
+#define SCTP_TIMERQ_LOCK() KASSERT(pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx already locked", __func__))
+#define SCTP_TIMERQ_UNLOCK() KASSERT(pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx not locked", __func__))
+#else
+#define SCTP_TIMERQ_LOCK() (void)pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK() (void)pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx))
+#endif
+#define SCTP_TIMERQ_LOCK_INIT() (void)pthread_mutex_init(&SCTP_BASE_VAR(timer_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_TIMERQ_LOCK_DESTROY() (void)pthread_mutex_destroy(&SCTP_BASE_VAR(timer_mtx))
+#endif
+#endif
+
+uint32_t sctp_get_tick_count(void);
+
+TAILQ_HEAD(calloutlist, sctp_callout);
+
+struct sctp_callout {
+ TAILQ_ENTRY(sctp_callout) tqe;
+ uint32_t c_time; /* ticks to the event */
+ void *c_arg; /* function argument */
+ void (*c_func)(void *); /* function to call */
+ int c_flags; /* state of this entry */
+};
+typedef struct sctp_callout sctp_os_timer_t;
+
+#define SCTP_CALLOUT_ACTIVE 0x0002 /* callout is currently active */
+#define SCTP_CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
+
+void sctp_os_timer_init(sctp_os_timer_t *tmr);
+/* Returns 1 if pending timer was rescheduled, 0 otherwise. */
+int sctp_os_timer_start(sctp_os_timer_t *, uint32_t, void (*)(void *), void *);
+/* Returns 1 if pending timer was stopped, 0 otherwise. */
+int sctp_os_timer_stop(sctp_os_timer_t *);
+void sctp_handle_tick(uint32_t);
+
+#define SCTP_OS_TIMER_INIT sctp_os_timer_init
+/*
+ * NOTE: The next two shouldn't be called directly outside of sctp_timer_start()
+ * and sctp_timer_stop(), since they don't handle incrementing/decrementing
+ * relevant reference counts.
+ */
+#define SCTP_OS_TIMER_START sctp_os_timer_start
+#define SCTP_OS_TIMER_STOP sctp_os_timer_stop
+/* MT FIXME: Is the following correct? */
+#define SCTP_OS_TIMER_STOP_DRAIN SCTP_OS_TIMER_STOP
+#define SCTP_OS_TIMER_PENDING(tmr) ((tmr)->c_flags & SCTP_CALLOUT_PENDING)
+#define SCTP_OS_TIMER_ACTIVE(tmr) ((tmr)->c_flags & SCTP_CALLOUT_ACTIVE)
+#define SCTP_OS_TIMER_DEACTIVATE(tmr) ((tmr)->c_flags &= ~SCTP_CALLOUT_ACTIVE)
+
+#if defined(__Userspace__)
+void sctp_start_timer_thread(void);
+void sctp_stop_timer_thread(void);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+void sctp_timeout(void *);
+#endif
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_cc_functions.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_cc_functions.c
new file mode 100644
index 000000000..dcf629857
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_cc_functions.c
@@ -0,0 +1,2508 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 359405 2020-03-28 20:25:45Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_kdtrace.h>
+#endif
+
+#define SHIFT_MPTCP_MULTI_N 40
+#define SHIFT_MPTCP_MULTI_Z 16
+#define SHIFT_MPTCP_MULTI 8
+
+static void
+sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
+{
+ if ((assoc->max_cwnd > 0) &&
+ (net->cwnd > assoc->max_cwnd) &&
+ (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
+ net->cwnd = assoc->max_cwnd ;
+ if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+ net->cwnd = net->mtu - sizeof(struct sctphdr);
+ }
+ }
+}
+
+static void
+sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *assoc;
+ uint32_t cwnd_in_mtu;
+
+ assoc = &stcb->asoc;
+ cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+ if (cwnd_in_mtu == 0) {
+ /* Using 0 means that the value of RFC 4960 is used. */
+ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+ } else {
+ /*
+ * We take the minimum of the burst limit and the
+ * initial congestion window.
+ */
+ if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
+ cwnd_in_mtu = assoc->max_burst;
+ net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+ }
+ if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+ (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+ /* In case of resource pooling initialize appropriately */
+ net->cwnd /= assoc->numnets;
+ if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+ net->cwnd = net->mtu - sizeof(struct sctphdr);
+ }
+ }
+ sctp_enforce_cwnd_limit(assoc, net);
+ net->ssthresh = assoc->peers_rwnd;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, init,
+ stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+ 0, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+ }
+}
+
+static void
+sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+ uint32_t t_ssthresh, t_cwnd;
+ uint64_t t_ucwnd_sbw;
+
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ t_ucwnd_sbw = 0;
+ if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+ (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ t_ssthresh += net->ssthresh;
+ t_cwnd += net->cwnd;
+ if (net->lastsa > 0) {
+ t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
+ }
+ }
+ if (t_ucwnd_sbw == 0) {
+ t_ucwnd_sbw = 1;
+ }
+ }
+
+ /*-
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off > 0)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+ int old_cwnd = net->cwnd;
+
+ if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+ (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+ if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
+ net->ssthresh = (uint32_t)(((uint64_t)4 *
+ (uint64_t)net->mtu *
+ (uint64_t)net->ssthresh) /
+ (uint64_t)t_ssthresh);
+
+ }
+ if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
+ uint32_t srtt;
+
+ srtt = net->lastsa;
+ /* lastsa>>3; we don't need to devide ...*/
+ if (srtt == 0) {
+ srtt = 1;
+ }
+ /* Short Version => Equal to Contel Version MBe */
+ net->ssthresh = (uint32_t) (((uint64_t)4 *
+ (uint64_t)net->mtu *
+ (uint64_t)net->cwnd) /
+ ((uint64_t)srtt *
+ t_ucwnd_sbw));
+ /* INCREASE FACTOR */;
+ }
+ if ((net->cwnd > t_cwnd / 2) &&
+ (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+ net->ssthresh = net->cwnd - t_cwnd / 2;
+ }
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ }
+ } else {
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ }
+ net->cwnd = net->ssthresh;
+ sctp_enforce_cwnd_limit(asoc, net);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, fr,
+ stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_FR);
+ }
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+/* Defines for instantaneous bw decisions */
+#define SCTP_INST_LOOSING 1 /* Losing to other flows */
+#define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
+#define SCTP_INST_GAINING 3 /* Gaining, step down possible */
+
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static int
+cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
+ uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw,
+ uint64_t rtt_offset, uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t oth, probepoint;
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+ if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
+ /*
+ * rtt increased
+ * we don't update bw.. so we don't
+ * update the rtt either.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 5 */
+ probepoint |= ((5 << 16) | 1);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+ if (net->cc_mod.rtcc.last_step_state == 5)
+ net->cc_mod.rtcc.step_cnt++;
+ else
+ net->cc_mod.rtcc.step_cnt = 1;
+ net->cc_mod.rtcc.last_step_state = 5;
+ if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+ ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+ ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+ /* Try a step down */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ if (net->cwnd > (4 * net->mtu)) {
+ net->cwnd -= net->mtu;
+ net->cc_mod.rtcc.vol_reduce++;
+ } else {
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+ }
+ }
+ return (1);
+ }
+ if (net->rtt < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+ /*
+ * rtt decreased, there could be more room.
+ * we update both the bw and the rtt here to
+ * lock this in as a good step down.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 6 */
+ probepoint |= ((6 << 16) | 0);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ if ((net->cc_mod.rtcc.last_step_state == 5) &&
+ (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
+ /* Step down worked */
+ net->cc_mod.rtcc.step_cnt = 0;
+ return (1);
+ } else {
+ net->cc_mod.rtcc.last_step_state = 6;
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+ }
+ net->cc_mod.rtcc.lbw = nbw;
+ net->cc_mod.rtcc.lbw_rtt = net->rtt;
+ net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+ if (inst_ind == SCTP_INST_GAINING)
+ return (1);
+ else if (inst_ind == SCTP_INST_NEUTRAL)
+ return (1);
+ else
+ return (0);
+ }
+ /* Ok bw and rtt remained the same .. no update to any
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 7 */
+ probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+ if (net->cc_mod.rtcc.last_step_state == 5)
+ net->cc_mod.rtcc.step_cnt++;
+ else
+ net->cc_mod.rtcc.step_cnt = 1;
+ net->cc_mod.rtcc.last_step_state = 5;
+ if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+ ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+ ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+ /* Try a step down */
+ if (net->cwnd > (4 * net->mtu)) {
+ net->cwnd -= net->mtu;
+ net->cc_mod.rtcc.vol_reduce++;
+ return (1);
+ } else {
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+ }
+ }
+ if (inst_ind == SCTP_INST_GAINING)
+ return (1);
+ else if (inst_ind == SCTP_INST_NEUTRAL)
+ return (1);
+ else
+ return ((int)net->cc_mod.rtcc.ret_from_eq);
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static int
+cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+ uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+ uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t oth, probepoint;
+#endif
+
+ /* Bandwidth decreased.*/
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+ if (net->rtt > net->cc_mod.rtcc.lbw_rtt+rtt_offset) {
+ /* rtt increased */
+ /* Did we add more */
+ if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
+ (inst_ind != SCTP_INST_LOOSING)) {
+ /* We caused it maybe.. back off? */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* PROBE POINT 1 */
+ probepoint |= ((1 << 16) | 1);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if (net->cc_mod.rtcc.ret_from_eq) {
+ /* Switch over to CA if we are less aggressive */
+ net->ssthresh = net->cwnd-1;
+ net->partial_bytes_acked = 0;
+ }
+ return (1);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 2 */
+ probepoint |= ((2 << 16) | 0);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ /* Someone else - fight for more? */
+ if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ /* Did we voluntarily give up some? if so take
+ * one back please
+ */
+ if ((net->cc_mod.rtcc.vol_reduce) &&
+ (inst_ind != SCTP_INST_GAINING)) {
+ net->cwnd += net->mtu;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ net->cc_mod.rtcc.vol_reduce--;
+ }
+ net->cc_mod.rtcc.last_step_state = 2;
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+ goto out_decision;
+ } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+ /* bw & rtt decreased */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 3 */
+ probepoint |= ((3 << 16) | 0);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ if ((net->cc_mod.rtcc.vol_reduce) &&
+ (inst_ind != SCTP_INST_GAINING)) {
+ net->cwnd += net->mtu;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ net->cc_mod.rtcc.vol_reduce--;
+ }
+ net->cc_mod.rtcc.last_step_state = 3;
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+ goto out_decision;
+ }
+ /* The bw decreased but rtt stayed the same */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Probe point 4 */
+ probepoint |= ((4 << 16) | 0);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ if ((net->cc_mod.rtcc.vol_reduce) &&
+ (inst_ind != SCTP_INST_GAINING)) {
+ net->cwnd += net->mtu;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ net->cc_mod.rtcc.vol_reduce--;
+ }
+ net->cc_mod.rtcc.last_step_state = 4;
+ net->cc_mod.rtcc.step_cnt = 0;
+ }
+out_decision:
+ net->cc_mod.rtcc.lbw = nbw;
+ net->cc_mod.rtcc.lbw_rtt = net->rtt;
+ net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+ if (inst_ind == SCTP_INST_GAINING) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static int
+cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
+#else
+static int
+cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw)
+#endif
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t oth, probepoint;
+
+#endif
+ /* BW increased, so update and
+ * return 0, since all actions in
+ * our table say to do the normal CC
+ * update. Note that we pay no attention to
+ * the inst_ind since our overall sum is increasing.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* PROBE POINT 0 */
+ probepoint = (((uint64_t)net->cwnd) << 32);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ oth = net->cc_mod.rtcc.vol_reduce;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.step_cnt;
+ oth <<= 16;
+ oth |= net->cc_mod.rtcc.last_step_state;
+ SDT_PROBE5(sctp, cwnd, net, rttstep,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | nbw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ oth,
+ probepoint);
+#endif
+ net->cc_mod.rtcc.last_step_state = 0;
+ net->cc_mod.rtcc.step_cnt = 0;
+ net->cc_mod.rtcc.vol_reduce = 0;
+ }
+ net->cc_mod.rtcc.lbw = nbw;
+ net->cc_mod.rtcc.lbw_rtt = net->rtt;
+ net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+ return (0);
+}
+
+/* RTCC Algorithm to limit growth of cwnd, return
+ * true if you want to NOT allow cwnd growth
+ */
+static int
+cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
+{
+ uint64_t bw_offset, rtt_offset;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t probepoint, rtt, vtag;
+#endif
+ uint64_t bytes_for_this_rtt, inst_bw;
+ uint64_t div, inst_off;
+ int bw_shift;
+ uint8_t inst_ind;
+ int ret;
+ /*-
+ * Here we need to see if we want
+ * to limit cwnd growth due to increase
+ * in overall rtt but no increase in bw.
+ * We use the following table to figure
+ * out what we should do. When we return
+ * 0, cc update goes on as planned. If we
+ * return 1, then no cc update happens and cwnd
+ * stays where it is at.
+ * ----------------------------------
+ * BW | RTT | Action
+ * *********************************
+ * INC | INC | return 0
+ * ----------------------------------
+ * INC | SAME | return 0
+ * ----------------------------------
+ * INC | DECR | return 0
+ * ----------------------------------
+ * SAME | INC | return 1
+ * ----------------------------------
+ * SAME | SAME | return 1
+ * ----------------------------------
+ * SAME | DECR | return 0
+ * ----------------------------------
+ * DECR | INC | return 0 or 1 based on if we caused.
+ * ----------------------------------
+ * DECR | SAME | return 0
+ * ----------------------------------
+ * DECR | DECR | return 0
+ * ----------------------------------
+ *
+ * We are a bit fuzz on what an increase or
+ * decrease is. For BW it is the same if
+ * it did not change within 1/64th. For
+ * RTT it stayed the same if it did not
+ * change within 1/32nd
+ */
+ bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ rtt = stcb->asoc.my_vtag;
+ vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
+ probepoint = (((uint64_t)net->cwnd) << 32);
+ rtt = net->rtt;
+#endif
+ if (net->cc_mod.rtcc.rtt_set_this_sack) {
+ net->cc_mod.rtcc.rtt_set_this_sack = 0;
+ bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
+ net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+ if (net->rtt) {
+ div = net->rtt / 1000;
+ if (div) {
+ inst_bw = bytes_for_this_rtt / div;
+ inst_off = inst_bw >> bw_shift;
+ if (inst_bw > nbw)
+ inst_ind = SCTP_INST_GAINING;
+ else if ((inst_bw+inst_off) < nbw)
+ inst_ind = SCTP_INST_LOOSING;
+ else
+ inst_ind = SCTP_INST_NEUTRAL;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ probepoint |= ((0xb << 16) | inst_ind);
+#endif
+ } else {
+ inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
+ /* Can't determine do not change */
+ probepoint |= ((0xc << 16) | inst_ind);
+#endif
+ }
+ } else {
+ inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ inst_bw = bytes_for_this_rtt;
+ /* Can't determine do not change */
+ probepoint |= ((0xd << 16) | inst_ind);
+#endif
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((nbw << 32) | inst_bw),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ } else {
+ /* No rtt measurement, use last one */
+ inst_ind = net->cc_mod.rtcc.last_inst_ind;
+ }
+ bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
+ if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ret = cc_bw_increase(stcb, net, nbw, vtag);
+#else
+ ret = cc_bw_increase(stcb, net, nbw);
+#endif
+ goto out;
+ }
+ rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
+ if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+ ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+ goto out;
+ }
+ /* If we reach here then
+ * we are in a situation where
+ * the bw stayed the same.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+ ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+out:
+ net->cc_mod.rtcc.last_inst_ind = inst_ind;
+ return (ret);
+}
+
+static void
+sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
+{
+ struct sctp_nets *net;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ int old_cwnd;
+#endif
+ uint32_t t_ssthresh, t_cwnd, incr;
+ uint64_t t_ucwnd_sbw;
+ uint64_t t_path_mptcp;
+ uint64_t mptcp_like_alpha;
+ uint32_t srtt;
+ uint64_t max_path;
+
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ t_ucwnd_sbw = 0;
+ t_path_mptcp = 0;
+ mptcp_like_alpha = 1;
+ if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+ (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
+ (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
+ max_path = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ t_ssthresh += net->ssthresh;
+ t_cwnd += net->cwnd;
+ /* lastsa>>3; we don't need to devide ...*/
+ srtt = net->lastsa;
+ if (srtt > 0) {
+ uint64_t tmp;
+
+ t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
+ t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
+ (((uint64_t)net->mtu) * (uint64_t)srtt);
+ tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
+ ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
+ if (tmp > max_path) {
+ max_path = tmp;
+ }
+ }
+ }
+ if (t_path_mptcp > 0) {
+ mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
+ } else {
+ mptcp_like_alpha = 1;
+ }
+ }
+ if (t_ssthresh == 0) {
+ t_ssthresh = 1;
+ }
+ if (t_ucwnd_sbw == 0) {
+ t_ucwnd_sbw = 1;
+ }
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /* CMT fast recovery code
+ */
+ /*
+ if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+ @@@ Do something
+ }
+ else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ (will_exit == 0) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ return;
+ }
+ /*
+ * Did any measurements go on for this network?
+ */
+ if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
+ uint64_t nbw;
+ /*
+ * At this point our bw_bytes has been updated
+ * by incoming sack information.
+ *
+ * But our bw may not yet be set.
+ *
+ */
+ if ((net->cc_mod.rtcc.new_tot_time/1000) > 0) {
+ nbw = net->cc_mod.rtcc.bw_bytes/(net->cc_mod.rtcc.new_tot_time/1000);
+ } else {
+ nbw = net->cc_mod.rtcc.bw_bytes;
+ }
+ if (net->cc_mod.rtcc.lbw) {
+ if (cc_bw_limit(stcb, net, nbw)) {
+ /* Hold here, no update */
+ continue;
+ }
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t vtag, probepoint;
+
+ probepoint = (((uint64_t)net->cwnd) << 32);
+ probepoint |= ((0xa << 16) | 0);
+ vtag = (net->rtt << 32) |
+ (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+ (stcb->rport);
+
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ nbw,
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ net->cc_mod.rtcc.lbw = nbw;
+ net->cc_mod.rtcc.lbw_rtt = net->rtt;
+ if (net->cc_mod.rtcc.rtt_set_this_sack) {
+ net->cc_mod.rtcc.rtt_set_this_sack = 0;
+ net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+ }
+ }
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+ /* If the cumulative ack moved we can proceed */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+ uint32_t limit;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ old_cwnd = net->cwnd;
+#endif
+ switch (asoc->sctp_cmt_on_off) {
+ case SCTP_CMT_RPV1:
+ limit = (uint32_t)(((uint64_t)net->mtu *
+ (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+ (uint64_t)net->ssthresh) /
+ (uint64_t)t_ssthresh);
+ incr = (uint32_t)(((uint64_t)net->net_ack *
+ (uint64_t)net->ssthresh) /
+ (uint64_t)t_ssthresh);
+ if (incr > limit) {
+ incr = limit;
+ }
+ if (incr == 0) {
+ incr = 1;
+ }
+ break;
+ case SCTP_CMT_RPV2:
+ /* lastsa>>3; we don't need to divide ...*/
+ srtt = net->lastsa;
+ if (srtt == 0) {
+ srtt = 1;
+ }
+ limit = (uint32_t)(((uint64_t)net->mtu *
+ (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+ (uint64_t)net->cwnd) /
+ ((uint64_t)srtt * t_ucwnd_sbw));
+ /* INCREASE FACTOR */
+ incr = (uint32_t)(((uint64_t)net->net_ack *
+ (uint64_t)net->cwnd) /
+ ((uint64_t)srtt * t_ucwnd_sbw));
+ /* INCREASE FACTOR */
+ if (incr > limit) {
+ incr = limit;
+ }
+ if (incr == 0) {
+ incr = 1;
+ }
+ break;
+ case SCTP_CMT_MPTCP:
+ limit = (uint32_t)(((uint64_t)net->mtu *
+ mptcp_like_alpha *
+ (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
+ SHIFT_MPTCP_MULTI);
+ incr = (uint32_t)(((uint64_t)net->net_ack *
+ mptcp_like_alpha) >>
+ SHIFT_MPTCP_MULTI);
+ if (incr > limit) {
+ incr = limit;
+ }
+ if (incr > net->net_ack) {
+ incr = net->net_ack;
+ }
+ if (incr > net->mtu) {
+ incr = net->mtu;
+ }
+ break;
+ default:
+ incr = net->net_ack;
+ if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
+ incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
+ }
+ break;
+ }
+ net->cwnd += incr;
+ sctp_enforce_cwnd_limit(asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, incr,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, ack,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ /* We are in congestion avoidance */
+ /*
+ * Add to pba
+ */
+ net->partial_bytes_acked += net->net_ack;
+
+ if ((net->flight_size + net->net_ack >= net->cwnd) &&
+ (net->partial_bytes_acked >= net->cwnd)) {
+ net->partial_bytes_acked -= net->cwnd;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ old_cwnd = net->cwnd;
+#endif
+ switch (asoc->sctp_cmt_on_off) {
+ case SCTP_CMT_RPV1:
+ incr = (uint32_t)(((uint64_t)net->mtu *
+ (uint64_t)net->ssthresh) /
+ (uint64_t)t_ssthresh);
+ if (incr == 0) {
+ incr = 1;
+ }
+ break;
+ case SCTP_CMT_RPV2:
+ /* lastsa>>3; we don't need to divide ... */
+ srtt = net->lastsa;
+ if (srtt == 0) {
+ srtt = 1;
+ }
+ incr = (uint32_t)((uint64_t)net->mtu *
+ (uint64_t)net->cwnd /
+ ((uint64_t)srtt *
+ t_ucwnd_sbw));
+ /* INCREASE FACTOR */
+ if (incr == 0) {
+ incr = 1;
+ }
+ break;
+ case SCTP_CMT_MPTCP:
+ incr = (uint32_t)((mptcp_like_alpha *
+ (uint64_t) net->cwnd) >>
+ SHIFT_MPTCP_MULTI);
+ if (incr > net->mtu) {
+ incr = net->mtu;
+ }
+ break;
+ default:
+ incr = net->mtu;
+ break;
+ }
+ net->cwnd += incr;
+ sctp_enforce_cwnd_limit(asoc, net);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, ack,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+ }
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
+#else
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net)
+#endif
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ int old_cwnd;
+
+ old_cwnd = net->cwnd;
+#endif
+ net->cwnd = net->mtu;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, ack,
+ stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+ old_cwnd, net->cwnd);
+#endif
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+ (void *)net, net->cwnd);
+}
+
+
+static void
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+ uint32_t t_ssthresh, t_cwnd;
+ uint64_t t_ucwnd_sbw;
+
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+ (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+ struct sctp_nets *lnet;
+ uint32_t srtt;
+
+ t_ucwnd_sbw = 0;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ t_ssthresh += lnet->ssthresh;
+ t_cwnd += lnet->cwnd;
+ srtt = lnet->lastsa;
+ /* lastsa>>3; we don't need to divide ... */
+ if (srtt > 0) {
+ t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
+ }
+ }
+ if (t_ssthresh < 1) {
+ t_ssthresh = 1;
+ }
+ if (t_ucwnd_sbw < 1) {
+ t_ucwnd_sbw = 1;
+ }
+ if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
+ net->ssthresh = (uint32_t)(((uint64_t)4 *
+ (uint64_t)net->mtu *
+ (uint64_t)net->ssthresh) /
+ (uint64_t)t_ssthresh);
+ } else {
+ uint64_t cc_delta;
+
+ srtt = net->lastsa;
+ /* lastsa>>3; we don't need to divide ... */
+ if (srtt == 0) {
+ srtt = 1;
+ }
+ cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
+ if (cc_delta < t_cwnd) {
+ net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
+ } else {
+ net->ssthresh = net->mtu;
+ }
+ }
+ if ((net->cwnd > t_cwnd / 2) &&
+ (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+ net->ssthresh = net->cwnd - t_cwnd / 2;
+ }
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ }
+ } else {
+ net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+ }
+ net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, to,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+}
+
+static void
+sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
+ int in_window, int num_pkt_lost, int use_rtcc)
+{
+ int old_cwnd = net->cwnd;
+ if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
+ /* Data center Congestion Control */
+ if (in_window == 0) {
+ /* Go to CA with the cwnd at the point we sent
+ * the TSN that was marked with a CE.
+ */
+ if (net->ecn_prev_cwnd < net->cwnd) {
+ /* Restore to prev cwnd */
+ net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
+ } else {
+ /* Just cut in 1/2 */
+ net->cwnd /= 2;
+ }
+ /* Drop to CA */
+ net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+ } else {
+ /* Further tuning down required over the drastic original cut */
+ net->ssthresh -= (net->mtu * num_pkt_lost);
+ net->cwnd -= (net->mtu * num_pkt_lost);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+
+ }
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ } else {
+ if (in_window == 0) {
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 1;
+ }
+ net->cwnd = net->ssthresh;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, ecn,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+ }
+ }
+
+}
+
+static void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t *bottle_bw, uint32_t *on_queue)
+{
+ uint32_t bw_avail;
+ unsigned int incr;
+ int old_cwnd = net->cwnd;
+
+ /* get bottle neck bw */
+ *bottle_bw = ntohl(cp->bottle_bw);
+ /* and whats on queue */
+ *on_queue = ntohl(cp->current_onq);
+ /*
+ * adjust the on-queue if our flight is more it could be
+ * that the router has not yet gotten data "in-flight" to it
+ */
+ if (*on_queue < net->flight_size) {
+ *on_queue = net->flight_size;
+ }
+ /* rtt is measured in micro seconds, bottle_bw in bytes per second */
+ bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
+ if (bw_avail > *bottle_bw) {
+ /*
+ * Cap the growth to no more than the bottle neck.
+ * This can happen as RTT slides up due to queues.
+ * It also means if you have more than a 1 second
+ * RTT with a empty queue you will be limited to the
+ * bottle_bw per second no matter if other points
+ * have 1/2 the RTT and you could get more out...
+ */
+ bw_avail = *bottle_bw;
+ }
+ if (*on_queue > bw_avail) {
+ /*
+ * No room for anything else don't allow anything
+ * else to be "added to the fire".
+ */
+ int seg_inflight, seg_onqueue, my_portion;
+
+ net->partial_bytes_acked = 0;
+ /* how much are we over queue size? */
+ incr = *on_queue - bw_avail;
+ if (stcb->asoc.seen_a_sack_this_pkt) {
+ /*
+ * undo any cwnd adjustment that the sack
+ * might have made
+ */
+ net->cwnd = net->prev_cwnd;
+ }
+ /* Now how much of that is mine? */
+ seg_inflight = net->flight_size / net->mtu;
+ seg_onqueue = *on_queue / net->mtu;
+ my_portion = (incr * seg_inflight) / seg_onqueue;
+
+ /* Have I made an adjustment already */
+ if (net->cwnd > net->flight_size) {
+ /*
+ * for this flight I made an adjustment we
+ * need to decrease the portion by a share
+ * our previous adjustment.
+ */
+ int diff_adj;
+
+ diff_adj = net->cwnd - net->flight_size;
+ if (diff_adj > my_portion)
+ my_portion = 0;
+ else
+ my_portion -= diff_adj;
+ }
+ /*
+ * back down to the previous cwnd (assume we have
+ * had a sack before this packet). minus what ever
+ * portion of the overage is my fault.
+ */
+ net->cwnd -= my_portion;
+
+ /* we will NOT back down more than 1 MTU */
+ if (net->cwnd <= net->mtu) {
+ net->cwnd = net->mtu;
+ }
+ /* force into CA */
+ net->ssthresh = net->cwnd - 1;
+ } else {
+ /*
+ * Take 1/4 of the space left or max burst up ..
+ * whichever is less.
+ */
+ incr = (bw_avail - *on_queue) >> 2;
+ if ((stcb->asoc.max_burst > 0) &&
+ (stcb->asoc.max_burst * net->mtu < incr)) {
+ incr = stcb->asoc.max_burst * net->mtu;
+ }
+ net->cwnd += incr;
+ }
+ if (net->cwnd > bw_avail) {
+ /* We can't exceed the pipe size */
+ net->cwnd = bw_avail;
+ }
+ if (net->cwnd < net->mtu) {
+ /* We always have 1 MTU */
+ net->cwnd = net->mtu;
+ }
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ if (net->cwnd - old_cwnd != 0) {
+ /* log only changes */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, pd,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_SAT);
+ }
+ }
+}
+
+static void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit)
+{
+ int old_cwnd = net->cwnd;
+
+ if (net->ssthresh < net->cwnd)
+ net->ssthresh = net->cwnd;
+ if (burst_limit) {
+ net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SDT_PROBE5(sctp, cwnd, net, bl,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
+ }
+ }
+}
+
+static void
+sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ /* Passing a zero argument in last disables the rtcc algorithm */
+ sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
+}
+
+static void
+sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+ int in_window, int num_pkt_lost)
+{
+ /* Passing a zero argument in last disables the rtcc algorithm */
+ sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
+}
+
+/* Here starts the RTCCVAR type CC invented by RRS which
+ * is a slight mod to RFC2581. We reuse a common routine or
+ * two since these algorithms are so close and need to
+ * remain the same.
+ */
+static void
+sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+ int in_window, int num_pkt_lost)
+{
+ sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
+}
+
+
+static
+void sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
+ struct sctp_tmit_chunk *tp1)
+{
+ net->cc_mod.rtcc.bw_bytes += tp1->send_size;
+}
+
+static void
+sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
+ struct sctp_nets *net)
+{
+ if (net->cc_mod.rtcc.tls_needs_set > 0) {
+ /* We had a bw measurment going on */
+ struct timeval ltls;
+ SCTP_GETPTIME_TIMEVAL(&ltls);
+ timevalsub(&ltls, &net->cc_mod.rtcc.tls);
+ net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
+ }
+}
+
+static void
+sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t vtag, probepoint;
+
+#endif
+ if (net->cc_mod.rtcc.lbw) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Clear the old bw.. we went to 0 in-flight */
+ vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+ (stcb->rport);
+ probepoint = (((uint64_t)net->cwnd) << 32);
+ /* Probe point 8 */
+ probepoint |= ((8 << 16) | 0);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ ((net->cc_mod.rtcc.lbw << 32) | 0),
+ ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+ net->flight_size,
+ probepoint);
+#endif
+ net->cc_mod.rtcc.lbw_rtt = 0;
+ net->cc_mod.rtcc.cwnd_at_bw_set = 0;
+ net->cc_mod.rtcc.lbw = 0;
+ net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
+ net->cc_mod.rtcc.vol_reduce = 0;
+ net->cc_mod.rtcc.bw_tot_time = 0;
+ net->cc_mod.rtcc.bw_bytes = 0;
+ net->cc_mod.rtcc.tls_needs_set = 0;
+ if (net->cc_mod.rtcc.steady_step) {
+ net->cc_mod.rtcc.vol_reduce = 0;
+ net->cc_mod.rtcc.step_cnt = 0;
+ net->cc_mod.rtcc.last_step_state = 0;
+ }
+ if (net->cc_mod.rtcc.ret_from_eq) {
+ /* less aggressive one - reset cwnd too */
+ uint32_t cwnd_in_mtu, cwnd;
+
+ cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+ if (cwnd_in_mtu == 0) {
+ /* Using 0 means that the value of RFC 4960 is used. */
+ cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+ } else {
+ /*
+ * We take the minimum of the burst limit and the
+ * initial congestion window.
+ */
+ if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
+ cwnd_in_mtu = stcb->asoc.max_burst;
+ cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+ }
+ if (net->cwnd > cwnd) {
+ /* Only set if we are not a timeout (i.e. down to 1 mtu) */
+ net->cwnd = cwnd;
+ }
+ }
+ }
+}
+
+static void
+sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint64_t vtag, probepoint;
+
+#endif
+ sctp_set_initial_cc_param(stcb, net);
+ stcb->asoc.use_precise_time = 1;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ probepoint = (((uint64_t)net->cwnd) << 32);
+ probepoint |= ((9 << 16) | 0);
+ vtag = (net->rtt << 32) |
+ (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+ (stcb->rport);
+ SDT_PROBE5(sctp, cwnd, net, rttvar,
+ vtag,
+ 0,
+ 0,
+ 0,
+ probepoint);
+#endif
+ net->cc_mod.rtcc.lbw_rtt = 0;
+ net->cc_mod.rtcc.cwnd_at_bw_set = 0;
+ net->cc_mod.rtcc.vol_reduce = 0;
+ net->cc_mod.rtcc.lbw = 0;
+ net->cc_mod.rtcc.vol_reduce = 0;
+ net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
+ net->cc_mod.rtcc.bw_tot_time = 0;
+ net->cc_mod.rtcc.bw_bytes = 0;
+ net->cc_mod.rtcc.tls_needs_set = 0;
+ net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
+ net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
+ net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
+ net->cc_mod.rtcc.step_cnt = 0;
+ net->cc_mod.rtcc.last_step_state = 0;
+
+
+}
+
+static int
+sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
+ struct sctp_cc_option *cc_opt)
+{
+ struct sctp_nets *net;
+ if (setorget == 1) {
+ /* a set */
+ if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
+ if ((cc_opt->aid_value.assoc_value != 0) &&
+ (cc_opt->aid_value.assoc_value != 1)) {
+ return (EINVAL);
+ }
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
+ }
+ } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
+ if ((cc_opt->aid_value.assoc_value != 0) &&
+ (cc_opt->aid_value.assoc_value != 1)) {
+ return (EINVAL);
+ }
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
+ }
+ } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
+ }
+ } else {
+ return (EINVAL);
+ }
+ } else {
+ /* a get */
+ if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ return (EFAULT);
+ }
+ cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
+ } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ return (EFAULT);
+ }
+ cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
+ } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ return (EFAULT);
+ }
+ cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
+ } else {
+ return (EINVAL);
+ }
+ }
+ return (0);
+}
+
+static void
+sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
+ struct sctp_nets *net)
+{
+ if (net->cc_mod.rtcc.tls_needs_set == 0) {
+ SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
+ net->cc_mod.rtcc.tls_needs_set = 2;
+ }
+}
+
+static void
+sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ /* Passing a one argument at the last enables the rtcc algorithm */
+ sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
+}
+
+static void
+sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
+ struct sctp_nets *net,
+ struct timeval *now SCTP_UNUSED)
+{
+ net->cc_mod.rtcc.rtt_set_this_sack = 1;
+}
+
+/* Here starts Sally Floyds HS-TCP */
+
+struct sctp_hs_raise_drop {
+ int32_t cwnd;
+ int8_t increase;
+ int8_t drop_percent;
+};
+
+#define SCTP_HS_TABLE_SIZE 73
+
+static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
+ {38, 1, 50}, /* 0 */
+ {118, 2, 44}, /* 1 */
+ {221, 3, 41}, /* 2 */
+ {347, 4, 38}, /* 3 */
+ {495, 5, 37}, /* 4 */
+ {663, 6, 35}, /* 5 */
+ {851, 7, 34}, /* 6 */
+ {1058, 8, 33}, /* 7 */
+ {1284, 9, 32}, /* 8 */
+ {1529, 10, 31}, /* 9 */
+ {1793, 11, 30}, /* 10 */
+ {2076, 12, 29}, /* 11 */
+ {2378, 13, 28}, /* 12 */
+ {2699, 14, 28}, /* 13 */
+ {3039, 15, 27}, /* 14 */
+ {3399, 16, 27}, /* 15 */
+ {3778, 17, 26}, /* 16 */
+ {4177, 18, 26}, /* 17 */
+ {4596, 19, 25}, /* 18 */
+ {5036, 20, 25}, /* 19 */
+ {5497, 21, 24}, /* 20 */
+ {5979, 22, 24}, /* 21 */
+ {6483, 23, 23}, /* 22 */
+ {7009, 24, 23}, /* 23 */
+ {7558, 25, 22}, /* 24 */
+ {8130, 26, 22}, /* 25 */
+ {8726, 27, 22}, /* 26 */
+ {9346, 28, 21}, /* 27 */
+ {9991, 29, 21}, /* 28 */
+ {10661, 30, 21}, /* 29 */
+ {11358, 31, 20}, /* 30 */
+ {12082, 32, 20}, /* 31 */
+ {12834, 33, 20}, /* 32 */
+ {13614, 34, 19}, /* 33 */
+ {14424, 35, 19}, /* 34 */
+ {15265, 36, 19}, /* 35 */
+ {16137, 37, 19}, /* 36 */
+ {17042, 38, 18}, /* 37 */
+ {17981, 39, 18}, /* 38 */
+ {18955, 40, 18}, /* 39 */
+ {19965, 41, 17}, /* 40 */
+ {21013, 42, 17}, /* 41 */
+ {22101, 43, 17}, /* 42 */
+ {23230, 44, 17}, /* 43 */
+ {24402, 45, 16}, /* 44 */
+ {25618, 46, 16}, /* 45 */
+ {26881, 47, 16}, /* 46 */
+ {28193, 48, 16}, /* 47 */
+ {29557, 49, 15}, /* 48 */
+ {30975, 50, 15}, /* 49 */
+ {32450, 51, 15}, /* 50 */
+ {33986, 52, 15}, /* 51 */
+ {35586, 53, 14}, /* 52 */
+ {37253, 54, 14}, /* 53 */
+ {38992, 55, 14}, /* 54 */
+ {40808, 56, 14}, /* 55 */
+ {42707, 57, 13}, /* 56 */
+ {44694, 58, 13}, /* 57 */
+ {46776, 59, 13}, /* 58 */
+ {48961, 60, 13}, /* 59 */
+ {51258, 61, 13}, /* 60 */
+ {53677, 62, 12}, /* 61 */
+ {56230, 63, 12}, /* 62 */
+ {58932, 64, 12}, /* 63 */
+ {61799, 65, 12}, /* 64 */
+ {64851, 66, 11}, /* 65 */
+ {68113, 67, 11}, /* 66 */
+ {71617, 68, 11}, /* 67 */
+ {75401, 69, 10}, /* 68 */
+ {79517, 70, 10}, /* 69 */
+ {84035, 71, 10}, /* 70 */
+ {89053, 72, 10}, /* 71 */
+ {94717, 73, 9} /* 72 */
+};
+
+static void
+sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int cur_val, i, indx, incr;
+ int old_cwnd = net->cwnd;
+
+ cur_val = net->cwnd >> 10;
+ indx = SCTP_HS_TABLE_SIZE - 1;
+
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ if (net->net_ack > net->mtu) {
+ net->cwnd += net->mtu;
+ } else {
+ net->cwnd += net->net_ack;
+ }
+ } else {
+ for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
+ if (cur_val < sctp_cwnd_adjust[i].cwnd) {
+ indx = i;
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
+ net->cwnd += incr;
+ }
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
+ }
+}
+
+static void
+sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int cur_val, i, indx;
+ int old_cwnd = net->cwnd;
+
+ cur_val = net->cwnd >> 10;
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ net->cwnd = net->ssthresh;
+ } else {
+ /* drop by the proper amount */
+ net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
+ (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
+ net->cwnd = net->ssthresh;
+ /* now where are we */
+ indx = net->last_hs_used;
+ cur_val = net->cwnd >> 10;
+ /* reset where we are in the table */
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* feel out of hs */
+ net->last_hs_used = 0;
+ } else {
+ for (i = indx; i >= 1; i--) {
+ if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ }
+ }
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
+ }
+}
+
+static void
+sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+ /*
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off > 0)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+
+ sctp_hs_cwnd_decrease(stcb, net);
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+static void
+sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
+{
+ struct sctp_nets *net;
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /* CMT fast recovery code
+ */
+ /*
+ if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+ @@@ Do something
+ }
+ else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ (will_exit == 0) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ return;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+ /* If the cumulative ack moved we can proceed */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+ sctp_hs_cwnd_increase(stcb, net);
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ /* We are in congestion avoidance */
+ net->partial_bytes_acked += net->net_ack;
+ if ((net->flight_size + net->net_ack >= net->cwnd) &&
+ (net->partial_bytes_acked >= net->cwnd)) {
+ net->partial_bytes_acked -= net->cwnd;
+ net->cwnd += net->mtu;
+ sctp_enforce_cwnd_limit(asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+ }
+}
+
+
+/*
+ * H-TCP congestion control. The algorithm is detailed in:
+ * R.N.Shorten, D.J.Leith:
+ * "H-TCP: TCP for high-speed and long-distance networks"
+ * Proc. PFLDnet, Argonne, 2004.
+ * http://www.hamilton.ie/net/htcp3.pdf
+ */
+
+
+static int use_rtt_scaling = 1;
+static int use_bandwidth_switch = 1;
+
+static inline int
+between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
+{
+ return (seq3 - seq2 >= seq1 - seq2);
+}
+
+static inline uint32_t
+htcp_cong_time(struct htcp *ca)
+{
+ return (sctp_get_tick_count() - ca->last_cong);
+}
+
+static inline uint32_t
+htcp_ccount(struct htcp *ca)
+{
+ return (ca->minRTT == 0 ? htcp_cong_time(ca) : htcp_cong_time(ca)/ca->minRTT);
+}
+
+static inline void
+htcp_reset(struct htcp *ca)
+{
+ ca->undo_last_cong = ca->last_cong;
+ ca->undo_maxRTT = ca->maxRTT;
+ ca->undo_old_maxB = ca->old_maxB;
+ ca->last_cong = sctp_get_tick_count();
+}
+
+#ifdef SCTP_NOT_USED
+
+static uint32_t
+htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
+ net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
+ net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
+ return (max(net->cwnd, ((net->ssthresh/net->mtu<<7)/net->cc_mod.htcp_ca.beta)*net->mtu));
+}
+
+#endif
+
+static inline void
+measure_rtt(struct sctp_nets *net)
+{
+ uint32_t srtt = net->lastsa>>SCTP_RTT_SHIFT;
+
+ /* keep track of minimum RTT seen so far, minRTT is zero at first */
+ if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
+ net->cc_mod.htcp_ca.minRTT = srtt;
+
+ /* max RTT */
+ if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
+ if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
+ net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
+ if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+sctp_msecs_to_ticks(20))
+ net->cc_mod.htcp_ca.maxRTT = srtt;
+ }
+}
+
+static void
+measure_achieved_throughput(struct sctp_nets *net)
+{
+ uint32_t now = sctp_get_tick_count();
+
+ if (net->fast_retran_ip == 0)
+ net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
+
+ if (!use_bandwidth_switch)
+ return;
+
+ /* achieved throughput calculations */
+ /* JRS - not 100% sure of this statement */
+ if (net->fast_retran_ip == 1) {
+ net->cc_mod.htcp_ca.bytecount = 0;
+ net->cc_mod.htcp_ca.lasttime = now;
+ return;
+ }
+
+ net->cc_mod.htcp_ca.bytecount += net->net_ack;
+ if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
+ (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
+ (net->cc_mod.htcp_ca.minRTT > 0)) {
+ uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount/net->mtu*hz/(now - net->cc_mod.htcp_ca.lasttime);
+
+ if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
+ /* just after backoff */
+ net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
+ } else {
+ net->cc_mod.htcp_ca.Bi = (3*net->cc_mod.htcp_ca.Bi + cur_Bi)/4;
+ if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
+ net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
+ if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
+ net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
+ }
+ net->cc_mod.htcp_ca.bytecount = 0;
+ net->cc_mod.htcp_ca.lasttime = now;
+ }
+}
+
+static inline void
+htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
+{
+ if (use_bandwidth_switch) {
+ uint32_t maxB = ca->maxB;
+ uint32_t old_maxB = ca->old_maxB;
+ ca->old_maxB = ca->maxB;
+
+ if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) {
+ ca->beta = BETA_MIN;
+ ca->modeswitch = 0;
+ return;
+ }
+ }
+
+ if (ca->modeswitch && minRTT > sctp_msecs_to_ticks(10) && maxRTT) {
+ ca->beta = (minRTT<<7)/maxRTT;
+ if (ca->beta < BETA_MIN)
+ ca->beta = BETA_MIN;
+ else if (ca->beta > BETA_MAX)
+ ca->beta = BETA_MAX;
+ } else {
+ ca->beta = BETA_MIN;
+ ca->modeswitch = 1;
+ }
+}
+
+static inline void
+htcp_alpha_update(struct htcp *ca)
+{
+ uint32_t minRTT = ca->minRTT;
+ uint32_t factor = 1;
+ uint32_t diff = htcp_cong_time(ca);
+
+ if (diff > (uint32_t)hz) {
+ diff -= hz;
+ factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/hz))/hz;
+ }
+
+ if (use_rtt_scaling && minRTT) {
+ uint32_t scale = (hz<<3)/(10*minRTT);
+ scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */
+ factor = (factor<<3)/scale;
+ if (!factor)
+ factor = 1;
+ }
+
+ ca->alpha = 2*factor*((1<<7)-ca->beta);
+ if (!ca->alpha)
+ ca->alpha = ALPHA_BASE;
+}
+
+/* After we have the rtt data to calculate beta, we'd still prefer to wait one
+ * rtt before we adjust our beta to ensure we are working from a consistent
+ * data.
+ *
+ * This function should be called when we hit a congestion event since only at
+ * that point do we really have a real sense of maxRTT (the queues en route
+ * were getting just too full now).
+ */
+static void
+htcp_param_update(struct sctp_nets *net)
+{
+ uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
+ uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
+
+ htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
+ htcp_alpha_update(&net->cc_mod.htcp_ca);
+
+ /* add slowly fading memory for maxRTT to accommodate routing changes etc */
+ if (minRTT > 0 && maxRTT > minRTT)
+ net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
+}
+
+static uint32_t
+htcp_recalc_ssthresh(struct sctp_nets *net)
+{
+ htcp_param_update(net);
+ return (max(((net->cwnd/net->mtu * net->cc_mod.htcp_ca.beta) >> 7)*net->mtu, 2U*net->mtu));
+}
+
+static void
+htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*-
+ * How to handle these functions?
+ * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
+ * return;
+ */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+ if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
+ net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+
+ } else {
+ net->cwnd += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+
+ }
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ measure_rtt(net);
+
+ /* In dangerous area, increase slowly.
+ * In theory this is net->cwnd += alpha / net->cwnd
+ */
+ /* What is snd_cwnd_cnt?? */
+ if (((net->partial_bytes_acked/net->mtu * net->cc_mod.htcp_ca.alpha) >> 7)*net->mtu >= net->cwnd) {
+ /*-
+ * Does SCTP have a cwnd clamp?
+ * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
+ */
+ net->cwnd += net->mtu;
+ net->partial_bytes_acked = 0;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ htcp_alpha_update(&net->cc_mod.htcp_ca);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ net->partial_bytes_acked += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+
+ net->cc_mod.htcp_ca.bytes_acked = net->mtu;
+ }
+}
+
+#ifdef SCTP_NOT_USED
+/* Lower bound on congestion window. */
+static uint32_t
+htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ return (net->ssthresh);
+}
+#endif
+
+static void
+htcp_init(struct sctp_nets *net)
+{
+ memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
+ net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
+ net->cc_mod.htcp_ca.beta = BETA_MIN;
+ net->cc_mod.htcp_ca.bytes_acked = net->mtu;
+ net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
+}
+
+static void
+sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*
+ * We take the max of the burst limit times a MTU or the
+ * INITIAL_CWND. We then limit this to 4 MTU's of sending.
+ */
+ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+ net->ssthresh = stcb->asoc.peers_rwnd;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ htcp_init(net);
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+ }
+}
+
+static void
+sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
+{
+ struct sctp_nets *net;
+
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /* CMT fast recovery code
+ */
+ /*
+ if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+ @@@ Do something
+ }
+ else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ will_exit == 0 &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ return;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+ htcp_cong_avoid(stcb, net);
+ measure_achieved_throughput(net);
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+ }
+}
+
+static void
+sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+ /*
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off > 0)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+ int old_cwnd = net->cwnd;
+
+ /* JRS - reset as if state were changed */
+ htcp_reset(&net->cc_mod.htcp_ca);
+ net->ssthresh = htcp_recalc_ssthresh(net);
+ net->cwnd = net->ssthresh;
+ sctp_enforce_cwnd_limit(asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_FR);
+ }
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+ }
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+static void
+sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ /* JRS - reset as if the state were being changed to timeout */
+ htcp_reset(&net->cc_mod.htcp_ca);
+ net->ssthresh = htcp_recalc_ssthresh(net);
+ net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+}
+
+static void
+sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
+{
+ int old_cwnd;
+ old_cwnd = net->cwnd;
+
+ /* JRS - reset hctp as if state changed */
+ if (in_window == 0) {
+ htcp_reset(&net->cc_mod.htcp_ca);
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = htcp_recalc_ssthresh(net);
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 1;
+ }
+ net->cwnd = net->ssthresh;
+ sctp_enforce_cwnd_limit(&stcb->asoc, net);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+ }
+}
+
+const struct sctp_cc_functions sctp_cc_functions[] = {
+{
+#if defined(_WIN32) && !defined(__MINGW32__)
+ sctp_set_initial_cc_param,
+ sctp_cwnd_update_after_sack,
+ sctp_cwnd_update_exit_pf_common,
+ sctp_cwnd_update_after_fr,
+ sctp_cwnd_update_after_timeout,
+ sctp_cwnd_update_after_ecn_echo,
+ sctp_cwnd_update_after_packet_dropped,
+ sctp_cwnd_update_after_output,
+#else
+ .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
+ .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
+ .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+ .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
+ .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+ .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
+ .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+ .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(_WIN32) && !defined(__MINGW32__)
+ sctp_set_initial_cc_param,
+ sctp_hs_cwnd_update_after_sack,
+ sctp_cwnd_update_exit_pf_common,
+ sctp_hs_cwnd_update_after_fr,
+ sctp_cwnd_update_after_timeout,
+ sctp_cwnd_update_after_ecn_echo,
+ sctp_cwnd_update_after_packet_dropped,
+ sctp_cwnd_update_after_output,
+#else
+ .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
+ .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
+ .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+ .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
+ .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+ .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
+ .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+ .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(_WIN32) && !defined(__MINGW32__)
+ sctp_htcp_set_initial_cc_param,
+ sctp_htcp_cwnd_update_after_sack,
+ sctp_cwnd_update_exit_pf_common,
+ sctp_htcp_cwnd_update_after_fr,
+ sctp_htcp_cwnd_update_after_timeout,
+ sctp_htcp_cwnd_update_after_ecn_echo,
+ sctp_cwnd_update_after_packet_dropped,
+ sctp_cwnd_update_after_output,
+#else
+ .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
+ .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
+ .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+ .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
+ .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
+ .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
+ .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+ .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(_WIN32) && !defined(__MINGW32__)
+ sctp_set_rtcc_initial_cc_param,
+ sctp_cwnd_update_rtcc_after_sack,
+ sctp_cwnd_update_exit_pf_common,
+ sctp_cwnd_update_after_fr,
+ sctp_cwnd_update_after_timeout,
+ sctp_cwnd_update_rtcc_after_ecn_echo,
+ sctp_cwnd_update_after_packet_dropped,
+ sctp_cwnd_update_after_output,
+ sctp_cwnd_update_rtcc_packet_transmitted,
+ sctp_cwnd_update_rtcc_tsn_acknowledged,
+ sctp_cwnd_new_rtcc_transmission_begins,
+ sctp_cwnd_prepare_rtcc_net_for_sack,
+ sctp_cwnd_rtcc_socket_option,
+ sctp_rtt_rtcc_calculated
+#else
+ .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
+ .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
+ .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+ .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
+ .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+ .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
+ .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+ .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+ .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
+ .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
+ .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
+ .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
+ .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
+ .sctp_rtt_calculated = sctp_rtt_rtcc_calculated
+#endif
+}
+};
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_constants.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_constants.h
new file mode 100644
index 000000000..5e2c5d160
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_constants.h
@@ -0,0 +1,1078 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_constants.h 363440 2020-07-23 01:35:24Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_CONSTANTS_H_
+#define _NETINET_SCTP_CONSTANTS_H_
+
+#if defined(_WIN32) && defined(__Userspace__)
+extern void getwintimeofday(struct timeval *tv);
+#endif
+
+/* IANA assigned port number for SCTP over UDP encapsulation */
+#define SCTP_OVER_UDP_TUNNELING_PORT 9899
+
+/* Number of packets to get before sack sent by default */
+#define SCTP_DEFAULT_SACK_FREQ 2
+
+/* Address limit - This variable is calculated
+ * based on an 65535 byte max ip packet. We take out 100 bytes
+ * for the cookie, 40 bytes for a v6 header and 32
+ * bytes for the init structure. A second init structure
+ * for the init-ack and then finally a third one for the
+ * imbedded init. This yeilds 100+40+(3 * 32) = 236 bytes.
+ * This leaves 65299 bytes for addresses. We throw out the 299 bytes.
+ * Now whatever we send in the INIT() we need to allow to get back in the
+ * INIT-ACK plus all the values from INIT and INIT-ACK
+ * listed in the cookie. Plus we need some overhead for
+ * maybe copied parameters in the COOKIE. If we
+ * allow 1080 addresses, and each side has 1080 V6 addresses
+ * that will be 21600 bytes. In the INIT-ACK we will
+ * see the INIT-ACK 21600 + 43200 in the cookie. This leaves
+ * about 500 bytes slack for misc things in the cookie.
+ */
+#define SCTP_ADDRESS_LIMIT 1080
+
+/* We need at least 2k of space for us, inits
+ * larger than that lets abort.
+ */
+#define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
+
+/* Largest length of a chunk */
+#define SCTP_MAX_CHUNK_LENGTH 0xffff
+/* Largest length of an error cause */
+#define SCTP_MAX_CAUSE_LENGTH 0xffff
+/* Number of addresses where we just skip the counting */
+#define SCTP_COUNT_LIMIT 40
+
+#define SCTP_ZERO_COPY_TICK_DELAY (((100 * hz) + 999) / 1000)
+#define SCTP_ZERO_COPY_SENDQ_TICK_DELAY (((100 * hz) + 999) / 1000)
+
+/* Number of ticks to delay before running
+ * iterator on an address change.
+ */
+#define SCTP_ADDRESS_TICK_DELAY 2
+
+#define SCTP_VERSION_STRING "KAME-BSD 1.1"
+/* #define SCTP_AUDITING_ENABLED 1 used for debug/auditing */
+#define SCTP_AUDIT_SIZE 256
+
+
+#define SCTP_KTRHEAD_NAME "sctp_iterator"
+#define SCTP_KTHREAD_PAGES 0
+
+#define SCTP_MCORE_NAME "sctp_core_worker"
+
+
+/* If you support Multi-VRF how big to
+ * make the initial array of VRF's to.
+ */
+#define SCTP_DEFAULT_VRF_SIZE 4
+
+/* JRS - Values defined for the HTCP algorithm */
+#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
+#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
+#define BETA_MAX 102 /* 0.8 with shift << 7 */
+
+/* Places that CWND log can happen from */
+#define SCTP_CWND_LOG_FROM_FR 1
+#define SCTP_CWND_LOG_FROM_RTX 2
+#define SCTP_CWND_LOG_FROM_BRST 3
+#define SCTP_CWND_LOG_FROM_SS 4
+#define SCTP_CWND_LOG_FROM_CA 5
+#define SCTP_CWND_LOG_FROM_SAT 6
+#define SCTP_BLOCK_LOG_INTO_BLK 7
+#define SCTP_BLOCK_LOG_OUTOF_BLK 8
+#define SCTP_BLOCK_LOG_CHECK 9
+#define SCTP_STR_LOG_FROM_INTO_STRD 10
+#define SCTP_STR_LOG_FROM_IMMED_DEL 11
+#define SCTP_STR_LOG_FROM_INSERT_HD 12
+#define SCTP_STR_LOG_FROM_INSERT_MD 13
+#define SCTP_STR_LOG_FROM_INSERT_TL 14
+#define SCTP_STR_LOG_FROM_MARK_TSN 15
+#define SCTP_STR_LOG_FROM_EXPRS_DEL 16
+#define SCTP_FR_LOG_BIGGEST_TSNS 17
+#define SCTP_FR_LOG_STRIKE_TEST 18
+#define SCTP_FR_LOG_STRIKE_CHUNK 19
+#define SCTP_FR_T3_TIMEOUT 20
+#define SCTP_MAP_PREPARE_SLIDE 21
+#define SCTP_MAP_SLIDE_FROM 22
+#define SCTP_MAP_SLIDE_RESULT 23
+#define SCTP_MAP_SLIDE_CLEARED 24
+#define SCTP_MAP_SLIDE_NONE 25
+#define SCTP_FR_T3_MARK_TIME 26
+#define SCTP_FR_T3_MARKED 27
+#define SCTP_FR_T3_STOPPED 28
+#define SCTP_FR_MARKED 30
+#define SCTP_CWND_LOG_NOADV_SS 31
+#define SCTP_CWND_LOG_NOADV_CA 32
+#define SCTP_MAX_BURST_APPLIED 33
+#define SCTP_MAX_IFP_APPLIED 34
+#define SCTP_MAX_BURST_ERROR_STOP 35
+#define SCTP_INCREASE_PEER_RWND 36
+#define SCTP_DECREASE_PEER_RWND 37
+#define SCTP_SET_PEER_RWND_VIA_SACK 38
+#define SCTP_LOG_MBCNT_INCREASE 39
+#define SCTP_LOG_MBCNT_DECREASE 40
+#define SCTP_LOG_MBCNT_CHKSET 41
+#define SCTP_LOG_NEW_SACK 42
+#define SCTP_LOG_TSN_ACKED 43
+#define SCTP_LOG_TSN_REVOKED 44
+#define SCTP_LOG_LOCK_TCB 45
+#define SCTP_LOG_LOCK_INP 46
+#define SCTP_LOG_LOCK_SOCK 47
+#define SCTP_LOG_LOCK_SOCKBUF_R 48
+#define SCTP_LOG_LOCK_SOCKBUF_S 49
+#define SCTP_LOG_LOCK_CREATE 50
+#define SCTP_LOG_INITIAL_RTT 51
+#define SCTP_LOG_RTTVAR 52
+#define SCTP_LOG_SBALLOC 53
+#define SCTP_LOG_SBFREE 54
+#define SCTP_LOG_SBRESULT 55
+#define SCTP_FR_DUPED 56
+#define SCTP_FR_MARKED_EARLY 57
+#define SCTP_FR_CWND_REPORT 58
+#define SCTP_FR_CWND_REPORT_START 59
+#define SCTP_FR_CWND_REPORT_STOP 60
+#define SCTP_CWND_LOG_FROM_SEND 61
+#define SCTP_CWND_INITIALIZATION 62
+#define SCTP_CWND_LOG_FROM_T3 63
+#define SCTP_CWND_LOG_FROM_SACK 64
+#define SCTP_CWND_LOG_NO_CUMACK 65
+#define SCTP_CWND_LOG_FROM_RESEND 66
+#define SCTP_FR_LOG_CHECK_STRIKE 67
+#define SCTP_SEND_NOW_COMPLETES 68
+#define SCTP_CWND_LOG_FILL_OUTQ_CALLED 69
+#define SCTP_CWND_LOG_FILL_OUTQ_FILLS 70
+#define SCTP_LOG_FREE_SENT 71
+#define SCTP_NAGLE_APPLIED 72
+#define SCTP_NAGLE_SKIPPED 73
+#define SCTP_WAKESND_FROM_SACK 74
+#define SCTP_WAKESND_FROM_FWDTSN 75
+#define SCTP_NOWAKE_FROM_SACK 76
+#define SCTP_CWNDLOG_PRESEND 77
+#define SCTP_CWNDLOG_ENDSEND 78
+#define SCTP_AT_END_OF_SACK 79
+#define SCTP_REASON_FOR_SC 80
+#define SCTP_BLOCK_LOG_INTO_BLKA 81
+#define SCTP_ENTER_USER_RECV 82
+#define SCTP_USER_RECV_SACKS 83
+#define SCTP_SORECV_BLOCKSA 84
+#define SCTP_SORECV_BLOCKSB 85
+#define SCTP_SORECV_DONE 86
+#define SCTP_SACK_RWND_UPDATE 87
+#define SCTP_SORECV_ENTER 88
+#define SCTP_SORECV_ENTERPL 89
+#define SCTP_MBUF_INPUT 90
+#define SCTP_MBUF_IALLOC 91
+#define SCTP_MBUF_IFREE 92
+#define SCTP_MBUF_ICOPY 93
+#define SCTP_MBUF_SPLIT 94
+#define SCTP_SORCV_FREECTL 95
+#define SCTP_SORCV_DOESCPY 96
+#define SCTP_SORCV_DOESLCK 97
+#define SCTP_SORCV_DOESADJ 98
+#define SCTP_SORCV_BOTWHILE 99
+#define SCTP_SORCV_PASSBF 100
+#define SCTP_SORCV_ADJD 101
+#define SCTP_UNKNOWN_MAX 102
+#define SCTP_RANDY_STUFF 103
+#define SCTP_RANDY_STUFF1 104
+#define SCTP_STRMOUT_LOG_ASSIGN 105
+#define SCTP_STRMOUT_LOG_SEND 106
+#define SCTP_FLIGHT_LOG_DOWN_CA 107
+#define SCTP_FLIGHT_LOG_UP 108
+#define SCTP_FLIGHT_LOG_DOWN_GAP 109
+#define SCTP_FLIGHT_LOG_DOWN_RSND 110
+#define SCTP_FLIGHT_LOG_UP_RSND 111
+#define SCTP_FLIGHT_LOG_DOWN_RSND_TO 112
+#define SCTP_FLIGHT_LOG_DOWN_WP 113
+#define SCTP_FLIGHT_LOG_UP_REVOKE 114
+#define SCTP_FLIGHT_LOG_DOWN_PDRP 115
+#define SCTP_FLIGHT_LOG_DOWN_PMTU 116
+#define SCTP_SACK_LOG_NORMAL 117
+#define SCTP_SACK_LOG_EXPRESS 118
+#define SCTP_MAP_TSN_ENTERS 119
+#define SCTP_THRESHOLD_CLEAR 120
+#define SCTP_THRESHOLD_INCR 121
+#define SCTP_FLIGHT_LOG_DWN_WP_FWD 122
+#define SCTP_FWD_TSN_CHECK 123
+#define SCTP_LOG_MAX_TYPES 124
+/*
+ * To turn on various logging, you must first enable 'options KTR' and
+ * you might want to bump the entires 'options KTR_ENTRIES=80000'.
+ * To get something to log you define one of the logging defines.
+ * (see LINT).
+ *
+ * This gets the compile in place, but you still need to turn the
+ * logging flag on too in the sysctl (see in sctp.h).
+ */
+
+#define SCTP_LOG_EVENT_UNKNOWN 0
+#define SCTP_LOG_EVENT_CWND 1
+#define SCTP_LOG_EVENT_BLOCK 2
+#define SCTP_LOG_EVENT_STRM 3
+#define SCTP_LOG_EVENT_FR 4
+#define SCTP_LOG_EVENT_MAP 5
+#define SCTP_LOG_EVENT_MAXBURST 6
+#define SCTP_LOG_EVENT_RWND 7
+#define SCTP_LOG_EVENT_MBCNT 8
+#define SCTP_LOG_EVENT_SACK 9
+#define SCTP_LOG_LOCK_EVENT 10
+#define SCTP_LOG_EVENT_RTT 11
+#define SCTP_LOG_EVENT_SB 12
+#define SCTP_LOG_EVENT_NAGLE 13
+#define SCTP_LOG_EVENT_WAKE 14
+#define SCTP_LOG_MISC_EVENT 15
+#define SCTP_LOG_EVENT_CLOSE 16
+#define SCTP_LOG_EVENT_MBUF 17
+#define SCTP_LOG_CHUNK_PROC 18
+#define SCTP_LOG_ERROR_RET 19
+
+#define SCTP_LOG_MAX_EVENT 20
+
+#define SCTP_LOCK_UNKNOWN 2
+
+
+/* number of associations by default for zone allocation */
+#define SCTP_MAX_NUM_OF_ASOC 40000
+/* how many addresses per assoc remote and local */
+#define SCTP_SCALE_FOR_ADDR 2
+
+/* default MULTIPLE_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MULTIPLE_ASCONFS 0
+
+/*
+ * Threshold for rwnd updates, we have to read (sb_hiwat >>
+ * SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
+ * window update sack. When we look, we compare the last rwnd we sent vs the
+ * current rwnd. It too must be greater than this value. Using 3 divdes the
+ * hiwat by 8, so for 200k rwnd we need to read 24k. For a 64k rwnd we need
+ * to read 8k. This seems about right.. I hope :-D.. we do set a
+ * min of a MTU on it so if the rwnd is real small we will insist
+ * on a full MTU of 1500 bytes.
+ */
+#define SCTP_RWND_HIWAT_SHIFT 3
+
+/* How much of the rwnd must the
+ * message be taking up to start partial delivery.
+ * We calculate this by shifing the hi_water (recv_win)
+ * left the following .. set to 1, when a message holds
+ * 1/2 the rwnd. If we set it to 2 when a message holds
+ * 1/4 the rwnd...etc..
+ */
+
+#define SCTP_PARTIAL_DELIVERY_SHIFT 1
+
+/*
+ * default HMAC for cookies, etc... use one of the AUTH HMAC id's
+ * SCTP_HMAC is the HMAC_ID to use
+ * SCTP_SIGNATURE_SIZE is the digest length
+ */
+#define SCTP_HMAC SCTP_AUTH_HMAC_ID_SHA1
+#define SCTP_SIGNATURE_SIZE SCTP_AUTH_DIGEST_LEN_SHA1
+#define SCTP_SIGNATURE_ALOC_SIZE SCTP_SIGNATURE_SIZE
+
+/*
+ * the SCTP protocol signature this includes the version number encoded in
+ * the last 4 bits of the signature.
+ */
+#define PROTO_SIGNATURE_A 0x30000000
+#define SCTP_VERSION_NUMBER 0x3
+
+#define MAX_TSN 0xffffffff
+
+/* how many executions every N tick's */
+#define SCTP_ITERATOR_MAX_AT_ONCE 20
+
+/* number of clock ticks between iterator executions */
+#define SCTP_ITERATOR_TICKS 1
+
+/*
+ * option: If you comment out the following you will receive the old behavior
+ * of obeying cwnd for the fast retransmit algorithm. With this defined a FR
+ * happens right away with-out waiting for the flightsize to drop below the
+ * cwnd value (which is reduced by the FR to 1/2 the inflight packets).
+ */
+#define SCTP_IGNORE_CWND_ON_FR 1
+
+/*
+ * Adds implementors guide behavior to only use newest highest update in SACK
+ * gap ack's to figure out if you need to stroke a chunk for FR.
+ */
+#define SCTP_NO_FR_UNLESS_SEGMENT_SMALLER 1
+
+/* default max I can burst out after a fast retransmit, 0 disables it */
+#define SCTP_DEF_MAX_BURST 4
+#define SCTP_DEF_HBMAX_BURST 4
+#define SCTP_DEF_FRMAX_BURST 4
+
+/* RTO calculation flag to say if it
+ * is safe to determine local lan or not.
+ */
+#define SCTP_RTT_FROM_NON_DATA 0
+#define SCTP_RTT_FROM_DATA 1
+
+#define PR_SCTP_UNORDERED_FLAG 0x0001
+
+/* IP hdr (20/40) + 12+2+2 (enet) + sctp common 12 */
+#define SCTP_FIRST_MBUF_RESV 68
+/* Packet transmit states in the sent field */
+#define SCTP_DATAGRAM_UNSENT 0
+#define SCTP_DATAGRAM_SENT 1
+#define SCTP_DATAGRAM_RESEND1 2 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND2 3 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND 4
+#define SCTP_DATAGRAM_ACKED 10010
+#define SCTP_DATAGRAM_MARKED 20010
+#define SCTP_FORWARD_TSN_SKIP 30010
+#define SCTP_DATAGRAM_NR_ACKED 40010
+
+/* chunk output send from locations */
+#define SCTP_OUTPUT_FROM_USR_SEND 0
+#define SCTP_OUTPUT_FROM_T3 1
+#define SCTP_OUTPUT_FROM_INPUT_ERROR 2
+#define SCTP_OUTPUT_FROM_CONTROL_PROC 3
+#define SCTP_OUTPUT_FROM_SACK_TMR 4
+#define SCTP_OUTPUT_FROM_SHUT_TMR 5
+#define SCTP_OUTPUT_FROM_HB_TMR 6
+#define SCTP_OUTPUT_FROM_SHUT_ACK_TMR 7
+#define SCTP_OUTPUT_FROM_ASCONF_TMR 8
+#define SCTP_OUTPUT_FROM_STRRST_TMR 9
+#define SCTP_OUTPUT_FROM_AUTOCLOSE_TMR 10
+#define SCTP_OUTPUT_FROM_EARLY_FR_TMR 11
+#define SCTP_OUTPUT_FROM_STRRST_REQ 12
+#define SCTP_OUTPUT_FROM_USR_RCVD 13
+#define SCTP_OUTPUT_FROM_COOKIE_ACK 14
+#define SCTP_OUTPUT_FROM_DRAIN 15
+#define SCTP_OUTPUT_FROM_CLOSING 16
+#define SCTP_OUTPUT_FROM_SOCKOPT 17
+
+/* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
+
+/* align to 32-bit sizes */
+#define SCTP_SIZE32(x) ((((x) + 3) >> 2) << 2)
+
+#define IS_SCTP_CONTROL(a) (((a)->chunk_type != SCTP_DATA) && ((a)->chunk_type != SCTP_IDATA))
+#define IS_SCTP_DATA(a) (((a)->chunk_type == SCTP_DATA) || ((a)->chunk_type == SCTP_IDATA))
+
+
+/* SCTP parameter types */
+/*************0x0000 series*************/
+#define SCTP_HEARTBEAT_INFO 0x0001
+#if defined(__Userspace__)
+#define SCTP_CONN_ADDRESS 0x0004
+#endif
+#define SCTP_IPV4_ADDRESS 0x0005
+#define SCTP_IPV6_ADDRESS 0x0006
+#define SCTP_STATE_COOKIE 0x0007
+#define SCTP_UNRECOG_PARAM 0x0008
+#define SCTP_COOKIE_PRESERVE 0x0009
+#define SCTP_HOSTNAME_ADDRESS 0x000b
+#define SCTP_SUPPORTED_ADDRTYPE 0x000c
+
+/* RFC 6525 */
+#define SCTP_STR_RESET_OUT_REQUEST 0x000d
+#define SCTP_STR_RESET_IN_REQUEST 0x000e
+#define SCTP_STR_RESET_TSN_REQUEST 0x000f
+#define SCTP_STR_RESET_RESPONSE 0x0010
+#define SCTP_STR_RESET_ADD_OUT_STREAMS 0x0011
+#define SCTP_STR_RESET_ADD_IN_STREAMS 0x0012
+
+#define SCTP_MAX_RESET_PARAMS 2
+#define SCTP_STREAM_RESET_TSN_DELTA 0x1000
+
+/*************0x4000 series*************/
+
+/*************0x8000 series*************/
+#define SCTP_ECN_CAPABLE 0x8000
+
+/* RFC 4895 */
+#define SCTP_RANDOM 0x8002
+#define SCTP_CHUNK_LIST 0x8003
+#define SCTP_HMAC_LIST 0x8004
+/* RFC 4820 */
+#define SCTP_PAD 0x8005
+/* RFC 5061 */
+#define SCTP_SUPPORTED_CHUNK_EXT 0x8008
+
+/*************0xC000 series*************/
+#define SCTP_PRSCTP_SUPPORTED 0xc000
+/* RFC 5061 */
+#define SCTP_ADD_IP_ADDRESS 0xc001
+#define SCTP_DEL_IP_ADDRESS 0xc002
+#define SCTP_ERROR_CAUSE_IND 0xc003
+#define SCTP_SET_PRIM_ADDR 0xc004
+#define SCTP_SUCCESS_REPORT 0xc005
+#define SCTP_ULP_ADAPTATION 0xc006
+/* behave-nat-draft */
+#define SCTP_HAS_NAT_SUPPORT 0xc007
+#define SCTP_NAT_VTAGS 0xc008
+
+/* bits for TOS field */
+#define SCTP_ECT0_BIT 0x02
+#define SCTP_ECT1_BIT 0x01
+#define SCTP_CE_BITS 0x03
+
+/* below turns off above */
+#define SCTP_FLEXIBLE_ADDRESS 0x20
+#define SCTP_NO_HEARTBEAT 0x40
+
+/* mask to get sticky */
+#define SCTP_STICKY_OPTIONS_MASK 0x0c
+
+
+/*
+ * SCTP states for internal state machine
+ */
+#define SCTP_STATE_EMPTY 0x0000
+#define SCTP_STATE_INUSE 0x0001
+#define SCTP_STATE_COOKIE_WAIT 0x0002
+#define SCTP_STATE_COOKIE_ECHOED 0x0004
+#define SCTP_STATE_OPEN 0x0008
+#define SCTP_STATE_SHUTDOWN_SENT 0x0010
+#define SCTP_STATE_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_STATE_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_STATE_SHUTDOWN_PENDING 0x0080
+#define SCTP_STATE_CLOSED_SOCKET 0x0100
+#define SCTP_STATE_ABOUT_TO_BE_FREED 0x0200
+#define SCTP_STATE_PARTIAL_MSG_LEFT 0x0400
+#define SCTP_STATE_WAS_ABORTED 0x0800
+#define SCTP_STATE_IN_ACCEPT_QUEUE 0x1000
+#define SCTP_STATE_MASK 0x007f
+
+#define SCTP_GET_STATE(_stcb) \
+ ((_stcb)->asoc.state & SCTP_STATE_MASK)
+#define SCTP_SET_STATE(_stcb, _state) \
+ sctp_set_state(_stcb, _state)
+#define SCTP_CLEAR_SUBSTATE(_stcb, _substate) \
+ (_stcb)->asoc.state &= ~(_substate)
+#define SCTP_ADD_SUBSTATE(_stcb, _substate) \
+ sctp_add_substate(_stcb, _substate)
+
+/* SCTP reachability state for each address */
+#define SCTP_ADDR_REACHABLE 0x001
+#define SCTP_ADDR_NO_PMTUD 0x002
+#define SCTP_ADDR_NOHB 0x004
+#define SCTP_ADDR_BEING_DELETED 0x008
+#define SCTP_ADDR_NOT_IN_ASSOC 0x010
+#define SCTP_ADDR_OUT_OF_SCOPE 0x080
+#define SCTP_ADDR_UNCONFIRMED 0x200
+#define SCTP_ADDR_REQ_PRIMARY 0x400
+/* JRS 5/13/07 - Added potentially failed state for CMT PF */
+#define SCTP_ADDR_PF 0x800
+
+/* bound address types (e.g. valid address types to allow) */
+#define SCTP_BOUND_V6 0x01
+#define SCTP_BOUND_V4 0x02
+
+/*
+ * what is the default number of mbufs in a chain I allow before switching to
+ * a cluster
+ */
+#define SCTP_DEFAULT_MBUFS_IN_CHAIN 5
+
+/* How long a cookie lives in milli-seconds */
+#define SCTP_DEFAULT_COOKIE_LIFE 60000
+
+/* Maximum the mapping array will grow to (TSN mapping array) */
+#define SCTP_MAPPING_ARRAY 512
+
+/* size of the initial malloc on the mapping array */
+#define SCTP_INITIAL_MAPPING_ARRAY 16
+/* how much we grow the mapping array each call */
+#define SCTP_MAPPING_ARRAY_INCR 32
+
+/*
+ * Here we define the timer types used by the implementation as arguments in
+ * the set/get timer type calls.
+ */
+#define SCTP_TIMER_INIT 0
+#define SCTP_TIMER_RECV 1
+#define SCTP_TIMER_SEND 2
+#define SCTP_TIMER_HEARTBEAT 3
+#define SCTP_TIMER_PMTU 4
+#define SCTP_TIMER_MAXSHUTDOWN 5
+#define SCTP_TIMER_SIGNATURE 6
+/*
+ * number of timer types in the base SCTP structure used in the set/get and
+ * has the base default.
+ */
+#define SCTP_NUM_TMRS 7
+
+/* timer types */
+#define SCTP_TIMER_TYPE_NONE 0
+#define SCTP_TIMER_TYPE_SEND 1
+#define SCTP_TIMER_TYPE_INIT 2
+#define SCTP_TIMER_TYPE_RECV 3
+#define SCTP_TIMER_TYPE_SHUTDOWN 4
+#define SCTP_TIMER_TYPE_HEARTBEAT 5
+#define SCTP_TIMER_TYPE_COOKIE 6
+#define SCTP_TIMER_TYPE_NEWCOOKIE 7
+#define SCTP_TIMER_TYPE_PATHMTURAISE 8
+#define SCTP_TIMER_TYPE_SHUTDOWNACK 9
+#define SCTP_TIMER_TYPE_ASCONF 10
+#define SCTP_TIMER_TYPE_SHUTDOWNGUARD 11
+#define SCTP_TIMER_TYPE_AUTOCLOSE 12
+#define SCTP_TIMER_TYPE_STRRESET 13
+#define SCTP_TIMER_TYPE_INPKILL 14
+#define SCTP_TIMER_TYPE_ASOCKILL 15
+#define SCTP_TIMER_TYPE_ADDR_WQ 16
+#define SCTP_TIMER_TYPE_PRIM_DELETED 17
+/* add new timers here - and increment LAST */
+#define SCTP_TIMER_TYPE_LAST 18
+
+#define SCTP_IS_TIMER_TYPE_VALID(t) (((t) > SCTP_TIMER_TYPE_NONE) && \
+ ((t) < SCTP_TIMER_TYPE_LAST))
+
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+/* Number of ticks to run the main timer at in msec */
+#define SCTP_MAIN_TIMER_DEFAULT 10
+#endif
+
+/* max number of TSN's dup'd that I will hold */
+#define SCTP_MAX_DUP_TSNS 20
+
+/*
+ * Here we define the types used when setting the retry amounts.
+ */
+/* How many drop re-attempts we make on INIT/COOKIE-ECHO */
+#define SCTP_RETRY_DROPPED_THRESH 4
+
+/*
+ * Maxmium number of chunks a single association can have on it. Note that
+ * this is a squishy number since the count can run over this if the user
+ * sends a large message down .. the fragmented chunks don't count until
+ * AFTER the message is on queue.. it would be the next send that blocks
+ * things. This number will get tuned up at boot in the sctp_init and use the
+ * number of clusters as a base. This way high bandwidth environments will
+ * not get impacted by the lower bandwidth sending a bunch of 1 byte chunks
+ */
+#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 512
+
+/*
+ * Basically the minimum amount of time before I do a early FR. Making this
+ * value to low will cause duplicate retransmissions.
+ */
+#define SCTP_MINFR_MSEC_TIMER 250
+/* The floor this value is allowed to fall to when starting a timer. */
+#define SCTP_MINFR_MSEC_FLOOR 20
+
+/* init timer def = 1 sec */
+#define SCTP_INIT_SEC 1
+
+/* send timer def = 1 seconds */
+#define SCTP_SEND_SEC 1
+
+/* recv timer def = 200ms */
+#define SCTP_RECV_MSEC 200
+
+/* 30 seconds + RTO (in ms) */
+#define SCTP_HB_DEFAULT_MSEC 30000
+
+/*
+ * This is how long a secret lives, NOT how long a cookie lives how many
+ * ticks the current secret will live.
+ */
+#define SCTP_DEFAULT_SECRET_LIFE_SEC 3600
+
+#define SCTP_RTO_UPPER_BOUND (60000) /* 60 sec in ms */
+#define SCTP_RTO_LOWER_BOUND (1000) /* 1 sec is ms */
+#define SCTP_RTO_INITIAL (3000) /* 3 sec in ms */
+
+
+#define SCTP_INP_KILL_TIMEOUT 20 /* number of ms to retry kill of inpcb */
+#define SCTP_ASOC_KILL_TIMEOUT 10 /* number of ms to retry kill of inpcb */
+
+#define SCTP_DEF_MAX_INIT 8
+#define SCTP_DEF_MAX_SEND 10
+#define SCTP_DEF_MAX_PATH_RTX 5
+#define SCTP_DEF_PATH_PF_THRESHOLD SCTP_DEF_MAX_PATH_RTX
+
+#define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */
+
+
+/* How many streams I request initially by default */
+#define SCTP_OSTREAM_INITIAL 10
+#define SCTP_ISTREAM_INITIAL 2048
+
+/*
+ * How many smallest_mtu's need to increase before a window update sack is
+ * sent (should be a power of 2).
+ */
+/* Send window update (incr * this > hiwat). Should be a power of 2 */
+#define SCTP_MINIMAL_RWND (4096) /* minimal rwnd */
+
+#define SCTP_ADDRMAX 16
+
+/* SCTP DEBUG Switch parameters */
+#define SCTP_DEBUG_TIMER1 0x00000001
+#define SCTP_DEBUG_TIMER2 0x00000002 /* unused */
+#define SCTP_DEBUG_TIMER3 0x00000004 /* unused */
+#define SCTP_DEBUG_TIMER4 0x00000008
+#define SCTP_DEBUG_OUTPUT1 0x00000010
+#define SCTP_DEBUG_OUTPUT2 0x00000020
+#define SCTP_DEBUG_OUTPUT3 0x00000040
+#define SCTP_DEBUG_OUTPUT4 0x00000080
+#define SCTP_DEBUG_UTIL1 0x00000100
+#define SCTP_DEBUG_UTIL2 0x00000200 /* unused */
+#define SCTP_DEBUG_AUTH1 0x00000400
+#define SCTP_DEBUG_AUTH2 0x00000800 /* unused */
+#define SCTP_DEBUG_INPUT1 0x00001000
+#define SCTP_DEBUG_INPUT2 0x00002000
+#define SCTP_DEBUG_INPUT3 0x00004000
+#define SCTP_DEBUG_INPUT4 0x00008000 /* unused */
+#define SCTP_DEBUG_ASCONF1 0x00010000
+#define SCTP_DEBUG_ASCONF2 0x00020000
+#define SCTP_DEBUG_OUTPUT5 0x00040000 /* unused */
+#define SCTP_DEBUG_XXX 0x00080000 /* unused */
+#define SCTP_DEBUG_PCB1 0x00100000
+#define SCTP_DEBUG_PCB2 0x00200000 /* unused */
+#define SCTP_DEBUG_PCB3 0x00400000
+#define SCTP_DEBUG_PCB4 0x00800000
+#define SCTP_DEBUG_INDATA1 0x01000000
+#define SCTP_DEBUG_INDATA2 0x02000000 /* unused */
+#define SCTP_DEBUG_INDATA3 0x04000000 /* unused */
+#define SCTP_DEBUG_CRCOFFLOAD 0x08000000 /* unused */
+#define SCTP_DEBUG_USRREQ1 0x10000000 /* unused */
+#define SCTP_DEBUG_USRREQ2 0x20000000 /* unused */
+#define SCTP_DEBUG_PEEL1 0x40000000
+#if defined(__Userspace__)
+#define SCTP_DEBUG_USR 0x80000000
+#else
+#define SCTP_DEBUG_XXXXX 0x80000000 /* unused */
+#endif
+#define SCTP_DEBUG_ALL 0x7ff3ffff
+#define SCTP_DEBUG_NOISY 0x00040000
+
+/* What sender needs to see to avoid SWS or we consider peers rwnd 0 */
+#define SCTP_SWS_SENDER_DEF 1420
+
+/*
+ * SWS is scaled to the sb_hiwat of the socket. A value of 2 is hiwat/4, 1
+ * would be hiwat/2 etc.
+ */
+/* What receiver needs to see in sockbuf or we tell peer its 1 */
+#define SCTP_SWS_RECEIVER_DEF 3000
+
+#define SCTP_INITIAL_CWND 4380
+
+#define SCTP_DEFAULT_MTU 1500 /* emergency default MTU */
+/* amount peer is obligated to have in rwnd or I will abort */
+#define SCTP_MIN_RWND 1500
+
+#define SCTP_DEFAULT_MAXSEGMENT 65535
+
+#define SCTP_CHUNK_BUFFER_SIZE 512
+#define SCTP_PARAM_BUFFER_SIZE 512
+
+/* small chunk store for looking at chunk_list in auth */
+#define SCTP_SMALL_CHUNK_STORE 260
+
+#define SCTP_HOW_MANY_SECRETS 2 /* how many secrets I keep */
+
+#define SCTP_NUMBER_OF_SECRETS 8 /* or 8 * 4 = 32 octets */
+#define SCTP_SECRET_SIZE 32 /* number of octets in a 256 bits */
+
+
+/*
+ * SCTP upper layer notifications
+ */
+#define SCTP_NOTIFY_ASSOC_UP 1
+#define SCTP_NOTIFY_ASSOC_DOWN 2
+#define SCTP_NOTIFY_INTERFACE_DOWN 3
+#define SCTP_NOTIFY_INTERFACE_UP 4
+#define SCTP_NOTIFY_SENT_DG_FAIL 5
+#define SCTP_NOTIFY_UNSENT_DG_FAIL 6
+#define SCTP_NOTIFY_SPECIAL_SP_FAIL 7
+#define SCTP_NOTIFY_ASSOC_LOC_ABORTED 8
+#define SCTP_NOTIFY_ASSOC_REM_ABORTED 9
+#define SCTP_NOTIFY_ASSOC_RESTART 10
+#define SCTP_NOTIFY_PEER_SHUTDOWN 11
+#define SCTP_NOTIFY_ASCONF_ADD_IP 12
+#define SCTP_NOTIFY_ASCONF_DELETE_IP 13
+#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 14
+#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 15
+#define SCTP_NOTIFY_INTERFACE_CONFIRMED 16
+#define SCTP_NOTIFY_STR_RESET_RECV 17
+#define SCTP_NOTIFY_STR_RESET_SEND 18
+#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 19
+#define SCTP_NOTIFY_STR_RESET_FAILED_IN 20
+#define SCTP_NOTIFY_STR_RESET_DENIED_OUT 21
+#define SCTP_NOTIFY_STR_RESET_DENIED_IN 22
+#define SCTP_NOTIFY_AUTH_NEW_KEY 23
+#define SCTP_NOTIFY_AUTH_FREE_KEY 24
+#define SCTP_NOTIFY_NO_PEER_AUTH 25
+#define SCTP_NOTIFY_SENDER_DRY 26
+#define SCTP_NOTIFY_REMOTE_ERROR 27
+
+/* This is the value for messages that are NOT completely
+ * copied down where we will start to split the message.
+ * So, with our default, we split only if the piece we
+ * want to take will fill up a full MTU (assuming
+ * a 1500 byte MTU).
+ */
+#define SCTP_DEFAULT_SPLIT_POINT_MIN 2904
+
+/* Maximum length of diagnostic information in error causes */
+#define SCTP_DIAG_INFO_LEN 128
+
+/* ABORT CODES and other tell-tale location
+ * codes are generated by adding the below
+ * to the instance id.
+ */
+
+/* File defines */
+#define SCTP_FROM_SCTP_INPUT 0x10000000
+#define SCTP_FROM_SCTP_PCB 0x20000000
+#define SCTP_FROM_SCTP_INDATA 0x30000000
+#define SCTP_FROM_SCTP_TIMER 0x40000000
+#define SCTP_FROM_SCTP_USRREQ 0x50000000
+#define SCTP_FROM_SCTPUTIL 0x60000000
+#define SCTP_FROM_SCTP6_USRREQ 0x70000000
+#define SCTP_FROM_SCTP_ASCONF 0x80000000
+#define SCTP_FROM_SCTP_OUTPUT 0x90000000
+#define SCTP_FROM_SCTP_PEELOFF 0xa0000000
+#define SCTP_FROM_SCTP_SYSCTL 0xb0000000
+#define SCTP_FROM_SCTP_CC_FUNCTIONS 0xc0000000
+
+/* Location ID's */
+#define SCTP_LOC_1 0x00000001
+#define SCTP_LOC_2 0x00000002
+#define SCTP_LOC_3 0x00000003
+#define SCTP_LOC_4 0x00000004
+#define SCTP_LOC_5 0x00000005
+#define SCTP_LOC_6 0x00000006
+#define SCTP_LOC_7 0x00000007
+#define SCTP_LOC_8 0x00000008
+#define SCTP_LOC_9 0x00000009
+#define SCTP_LOC_10 0x0000000a
+#define SCTP_LOC_11 0x0000000b
+#define SCTP_LOC_12 0x0000000c
+#define SCTP_LOC_13 0x0000000d
+#define SCTP_LOC_14 0x0000000e
+#define SCTP_LOC_15 0x0000000f
+#define SCTP_LOC_16 0x00000010
+#define SCTP_LOC_17 0x00000011
+#define SCTP_LOC_18 0x00000012
+#define SCTP_LOC_19 0x00000013
+#define SCTP_LOC_20 0x00000014
+#define SCTP_LOC_21 0x00000015
+#define SCTP_LOC_22 0x00000016
+#define SCTP_LOC_23 0x00000017
+#define SCTP_LOC_24 0x00000018
+#define SCTP_LOC_25 0x00000019
+#define SCTP_LOC_26 0x0000001a
+#define SCTP_LOC_27 0x0000001b
+#define SCTP_LOC_28 0x0000001c
+#define SCTP_LOC_29 0x0000001d
+#define SCTP_LOC_30 0x0000001e
+#define SCTP_LOC_31 0x0000001f
+#define SCTP_LOC_32 0x00000020
+#define SCTP_LOC_33 0x00000021
+#define SCTP_LOC_34 0x00000022
+#define SCTP_LOC_35 0x00000023
+#define SCTP_LOC_36 0x00000024
+#define SCTP_LOC_37 0x00000025
+
+/* Free assoc codes */
+#define SCTP_NORMAL_PROC 0
+#define SCTP_PCBFREE_NOFORCE 1
+#define SCTP_PCBFREE_FORCE 2
+
+/* From codes for adding addresses */
+#define SCTP_ADDR_IS_CONFIRMED 8
+#define SCTP_ADDR_DYNAMIC_ADDED 6
+#define SCTP_IN_COOKIE_PROC 100
+#define SCTP_ALLOC_ASOC 1
+#define SCTP_LOAD_ADDR_2 2
+#define SCTP_LOAD_ADDR_3 3
+#define SCTP_LOAD_ADDR_4 4
+#define SCTP_LOAD_ADDR_5 5
+
+#define SCTP_DONOT_SETSCOPE 0
+#define SCTP_DO_SETSCOPE 1
+
+
+/* This value determines the default for when
+ * we try to add more on the send queue., if
+ * there is room. This prevents us from cycling
+ * into the copy_resume routine to often if
+ * we have not got enough space to add a decent
+ * enough size message. Note that if we have enough
+ * space to complete the message copy we will always
+ * add to the message, no matter what the size. Its
+ * only when we reach the point that we have some left
+ * to add, there is only room for part of it that we
+ * will use this threshold. Its also a sysctl.
+ */
+#define SCTP_DEFAULT_ADD_MORE 1452
+
+#ifndef SCTP_PCBHASHSIZE
+/* default number of association hash buckets in each endpoint */
+#define SCTP_PCBHASHSIZE 256
+#endif
+#ifndef SCTP_TCBHASHSIZE
+#define SCTP_TCBHASHSIZE 1024
+#endif
+
+#ifndef SCTP_CHUNKQUEUE_SCALE
+#define SCTP_CHUNKQUEUE_SCALE 10
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+/* clock variance is 1 ms */
+#define SCTP_CLOCK_GRANULARITY 1
+#else
+/* clock variance is 10 ms */
+#define SCTP_CLOCK_GRANULARITY 10
+#endif
+#define IP_HDR_SIZE 40 /* we use the size of a IP6 header here this
+ * detracts a small amount for ipv4 but it
+ * simplifies the ipv6 addition */
+
+/* Argument magic number for sctp_inpcb_free() */
+
+/* third argument */
+#define SCTP_CALLED_DIRECTLY_NOCMPSET 0
+#define SCTP_CALLED_AFTER_CMPSET_OFCLOSE 1
+#define SCTP_CALLED_FROM_INPKILL_TIMER 2
+/* second argument */
+#define SCTP_FREE_SHOULD_USE_ABORT 1
+#define SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE 0
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132 /* the Official IANA number :-) */
+#endif /* !IPPROTO_SCTP */
+
+#define SCTP_MAX_DATA_BUNDLING 256
+
+/* modular comparison */
+/* See RFC 1982 for details. */
+#define SCTP_UINT16_GT(a, b) (((a < b) && ((uint16_t)(b - a) > (1U<<15))) || \
+ ((a > b) && ((uint16_t)(a - b) < (1U<<15))))
+#define SCTP_UINT16_GE(a, b) (SCTP_UINT16_GT(a, b) || (a == b))
+#define SCTP_UINT32_GT(a, b) (((a < b) && ((uint32_t)(b - a) > (1U<<31))) || \
+ ((a > b) && ((uint32_t)(a - b) < (1U<<31))))
+#define SCTP_UINT32_GE(a, b) (SCTP_UINT32_GT(a, b) || (a == b))
+
+#define SCTP_SSN_GT(a, b) SCTP_UINT16_GT(a, b)
+#define SCTP_SSN_GE(a, b) SCTP_UINT16_GE(a, b)
+#define SCTP_TSN_GT(a, b) SCTP_UINT32_GT(a, b)
+#define SCTP_TSN_GE(a, b) SCTP_UINT32_GE(a, b)
+#define SCTP_MID_GT(i, a, b) (((i) == 1) ? SCTP_UINT32_GT(a, b) : SCTP_UINT16_GT((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_GE(i, a, b) (((i) == 1) ? SCTP_UINT32_GE(a, b) : SCTP_UINT16_GE((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_EQ(i, a, b) (((i) == 1) ? a == b : (uint16_t)a == (uint16_t)b)
+
+/* Mapping array manipulation routines */
+#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
+#define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
+#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
+#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
+ if (tsn >= mapping_tsn) { \
+ gap = tsn - mapping_tsn; \
+ } else { \
+ gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
+ } \
+ } while (0)
+
+
+#define SCTP_RETRAN_DONE -1
+#define SCTP_RETRAN_EXIT -2
+
+/*
+ * This value defines the number of vtag block time wait entry's per list
+ * element. Each entry will take 2 4 byte ints (and of course the overhead
+ * of the next pointer as well). Using 15 as an example will yield * ((8 *
+ * 15) + 8) or 128 bytes of overhead for each timewait block that gets
+ * initialized. Increasing it to 31 would yield 256 bytes per block.
+ */
+#define SCTP_NUMBER_IN_VTAG_BLOCK 15
+/*
+ * If we use the STACK option, we have an array of this size head pointers.
+ * This array is mod'd the with the size to find which bucket and then all
+ * entries must be searched to see if the tag is in timed wait. If so we
+ * reject it.
+ */
+#define SCTP_STACK_VTAG_HASH_SIZE 32
+
+/*
+ * Number of seconds of time wait for a vtag.
+ */
+#define SCTP_TIME_WAIT 60
+
+/* How many micro seconds is the cutoff from
+ * local lan type rtt's
+ */
+ /*
+ * We allow 900us for the rtt.
+ */
+#define SCTP_LOCAL_LAN_RTT 900
+#define SCTP_LAN_UNKNOWN 0
+#define SCTP_LAN_LOCAL 1
+#define SCTP_LAN_INTERNET 2
+
+#define SCTP_SEND_BUFFER_SPLITTING 0x00000001
+#define SCTP_RECV_BUFFER_SPLITTING 0x00000002
+
+/* The system retains a cache of free chunks such to
+ * cut down on calls the memory allocation system. There
+ * is a per association limit of free items and a overall
+ * system limit. If either one gets hit then the resource
+ * stops being cached.
+ */
+
+#define SCTP_DEF_ASOC_RESC_LIMIT 10
+#define SCTP_DEF_SYSTEM_RESC_LIMIT 1000
+
+/*-
+ * defines for socket lock states.
+ * Used by __APPLE__
+ */
+#define SCTP_SO_LOCKED 1
+#define SCTP_SO_NOT_LOCKED 0
+
+
+/*-
+ * For address locks, do we hold the lock?
+ */
+#define SCTP_ADDR_LOCKED 1
+#define SCTP_ADDR_NOT_LOCKED 0
+
+#define IN4_ISPRIVATE_ADDRESS(a) \
+ ((((uint8_t *)&(a)->s_addr)[0] == 10) || \
+ ((((uint8_t *)&(a)->s_addr)[0] == 172) && \
+ (((uint8_t *)&(a)->s_addr)[1] >= 16) && \
+ (((uint8_t *)&(a)->s_addr)[1] <= 32)) || \
+ ((((uint8_t *)&(a)->s_addr)[0] == 192) && \
+ (((uint8_t *)&(a)->s_addr)[1] == 168)))
+
+#define IN4_ISLOOPBACK_ADDRESS(a) \
+ (((uint8_t *)&(a)->s_addr)[0] == 127)
+
+#define IN4_ISLINKLOCAL_ADDRESS(a) \
+ ((((uint8_t *)&(a)->s_addr)[0] == 169) && \
+ (((uint8_t *)&(a)->s_addr)[1] == 254))
+
+/* Maximum size of optval for IPPROTO_SCTP level socket options. */
+#define SCTP_SOCKET_OPTION_LIMIT (64 * 1024)
+
+#if defined(__Userspace__)
+#if defined(_WIN32)
+#define SCTP_GETTIME_TIMEVAL(x) getwintimeofday(x)
+#define SCTP_GETPTIME_TIMEVAL(x) getwintimeofday(x) /* this doesn't seem to ever be used.. */
+#else
+#define SCTP_GETTIME_TIMEVAL(x) gettimeofday(x, NULL)
+#define SCTP_GETPTIME_TIMEVAL(x) gettimeofday(x, NULL)
+#endif
+#endif
+
+#if defined(_KERNEL)
+#define SCTP_GETTIME_TIMEVAL(x) (getmicrouptime(x))
+#define SCTP_GETPTIME_TIMEVAL(x) (microuptime(x))
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#define sctp_sowwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup(so); \
+ } \
+} while (0)
+
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ SOCKBUF_UNLOCK(&((so)->so_snd)); \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup_locked(so); \
+ } \
+} while (0)
+#else
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ SOCKBUF_UNLOCK(&((so)->so_snd)); \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup(so); \
+ } \
+} while (0)
+#endif
+
+#define sctp_sorwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ } else { \
+ sorwakeup(so); \
+ } \
+} while (0)
+
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+ } else { \
+ sorwakeup_locked(so); \
+ } \
+} while (0)
+#else
+
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+ } else { \
+ sorwakeup(so); \
+ } \
+} while (0)
+#endif
+
+#endif /* _KERNEL || __Userspace__*/
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.c
new file mode 100644
index 000000000..0b5a06e06
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.c
@@ -0,0 +1,831 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 362498 2020-06-22 14:36:14Z tuexen $");
+
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/gsb_crc32.h>
+#include <sys/mbuf.h>
+
+#include <netinet/sctp.h>
+#include <netinet/sctp_crc32.h>
+#if defined(SCTP) || defined(SCTP_SUPPORT)
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#endif
+#else
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_crc32.h>
+#include <netinet/sctp_pcb.h>
+#endif
+
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+/**
+ *
+ * Routine Description:
+ *
+ * Computes the CRC32c checksum for the specified buffer using the slicing by 8
+ * algorithm over 64 bit quantities.
+ *
+ * Arguments:
+ *
+ * p_running_crc - pointer to the initial or final remainder value
+ * used in CRC computations. It should be set to
+ * non-NULL if the mode argument is equal to CONT or END
+ * p_buf - the packet buffer where crc computations are being performed
+ * length - the length of p_buf in bytes
+ * init_bytes - the number of initial bytes that need to be procesed before
+ * aligning p_buf to multiples of 4 bytes
+ * mode - can be any of the following: BEGIN, CONT, END, BODY, ALIGN
+ *
+ * Return value:
+ *
+ * The computed CRC32c value
+ */
+
+
+/*
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ *
+ * This software program is licensed subject to the BSD License, available at
+ * http://www.opensource.org/licenses/bsd-license.html.
+ *
+ * Abstract:
+ *
+ * Tables for software CRC generation
+ */
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o32[256] =
+{
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o32
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o40[256] =
+{
+ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o40
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o48[256] =
+{
+ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o48
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o56[256] =
+{
+ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o56
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o64[256] =
+{
+ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o64
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o72[256] =
+{
+ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o72
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o80[256] =
+{
+ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o80
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o88[256] =
+{
+ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o88
+ */
+
+
+static uint32_t
+sctp_crc32c_sb8_64_bit(uint32_t crc,
+ const unsigned char *p_buf,
+ uint32_t length,
+ uint32_t init_bytes)
+{
+ uint32_t li;
+ uint32_t term1, term2;
+ uint32_t running_length;
+ uint32_t end_bytes;
+
+ running_length = ((length - init_bytes) / 8) * 8;
+ end_bytes = length - init_bytes - running_length;
+
+ for (li = 0; li < init_bytes; li++)
+ crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+ (crc >> 8);
+ for (li = 0; li < running_length / 8; li++) {
+#if BYTE_ORDER == BIG_ENDIAN
+ crc ^= *p_buf++;
+ crc ^= (*p_buf++) << 8;
+ crc ^= (*p_buf++) << 16;
+ crc ^= (*p_buf++) << 24;
+#else
+ crc ^= *(const uint32_t *) p_buf;
+ p_buf += 4;
+#endif
+ term1 = sctp_crc_tableil8_o88[crc & 0x000000FF] ^
+ sctp_crc_tableil8_o80[(crc >> 8) & 0x000000FF];
+ term2 = crc >> 16;
+ crc = term1 ^
+ sctp_crc_tableil8_o72[term2 & 0x000000FF] ^
+ sctp_crc_tableil8_o64[(term2 >> 8) & 0x000000FF];
+
+#if BYTE_ORDER == BIG_ENDIAN
+ crc ^= sctp_crc_tableil8_o56[*p_buf++];
+ crc ^= sctp_crc_tableil8_o48[*p_buf++];
+ crc ^= sctp_crc_tableil8_o40[*p_buf++];
+ crc ^= sctp_crc_tableil8_o32[*p_buf++];
+#else
+ term1 = sctp_crc_tableil8_o56[(*(const uint32_t *) p_buf) & 0x000000FF] ^
+ sctp_crc_tableil8_o48[((*(const uint32_t *) p_buf) >> 8) & 0x000000FF];
+
+ term2 = (*(const uint32_t *) p_buf) >> 16;
+ crc = crc ^
+ term1 ^
+ sctp_crc_tableil8_o40[term2 & 0x000000FF] ^
+ sctp_crc_tableil8_o32[(term2 >> 8) & 0x000000FF];
+ p_buf += 4;
+#endif
+ }
+ for (li = 0; li < end_bytes; li++)
+ crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+ (crc >> 8);
+ return (crc);
+}
+
+
+/**
+ *
+ * Routine Description:
+ *
+ * warms the tables
+ *
+ * Arguments:
+ *
+ * none
+ *
+ * Return value:
+ *
+ * none
+ */
+static uint32_t
+multitable_crc32c(uint32_t crc32c,
+ const unsigned char *buffer,
+ unsigned int length)
+{
+ uint32_t to_even_word;
+
+ if (length == 0) {
+ return (crc32c);
+ }
+ to_even_word = (4 - (((uintptr_t) buffer) & 0x3));
+ return (sctp_crc32c_sb8_64_bit(crc32c, buffer, length, to_even_word));
+}
+
+static const uint32_t sctp_crc_c[256] = {
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
+ 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
+ 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+ 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
+ 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
+ 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+ 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
+ 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+ 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+ 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
+ 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
+ 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+ 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+ 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
+ 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+ 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
+ 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
+ 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+ 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
+ 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
+ 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+ 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
+ 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+ 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+ 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
+ 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
+ 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+ 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+ 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
+ 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+ 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
+ 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
+ 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
+};
+
+
+#define SCTP_CRC32C(c,d) (c=(c>>8)^sctp_crc_c[(c^(d))&0xFF])
+
+static uint32_t
+singletable_crc32c(uint32_t crc32c,
+ const unsigned char *buffer,
+ unsigned int length)
+{
+ unsigned int i;
+
+ for (i = 0; i < length; i++) {
+ SCTP_CRC32C(crc32c, buffer[i]);
+ }
+ return (crc32c);
+}
+
+#if defined(__Userspace__)
+uint32_t
+#else
+static uint32_t
+#endif
+calculate_crc32c(uint32_t crc32c,
+ const unsigned char *buffer,
+ unsigned int length)
+{
+ if (length < 4) {
+ return (singletable_crc32c(crc32c, buffer, length));
+ } else {
+ return (multitable_crc32c(crc32c, buffer, length));
+ }
+}
+
+#endif
+#if defined(__Userspace__)
+uint32_t
+#else
+static uint32_t
+#endif
+sctp_finalize_crc32c(uint32_t crc32c)
+{
+ uint32_t result;
+#if BYTE_ORDER == BIG_ENDIAN
+ uint8_t byte0, byte1, byte2, byte3;
+#endif
+
+ /* Complement the result */
+ result = ~crc32c;
+#if BYTE_ORDER == BIG_ENDIAN
+ /*
+ * For BIG-ENDIAN platforms the result is in little-endian form. So we
+ * must swap the bytes to return the result in network byte order.
+ */
+ byte0 = result & 0x000000ff;
+ byte1 = (result >> 8) & 0x000000ff;
+ byte2 = (result >> 16) & 0x000000ff;
+ byte3 = (result >> 24) & 0x000000ff;
+ crc32c = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
+#else
+ /*
+ * For LITTLE ENDIAN platforms the result is in already in network
+ * byte order.
+ */
+ crc32c = result;
+#endif
+ return (crc32c);
+}
+
+/*
+ * Compute the SCTP checksum in network byte order for a given mbuf chain m
+ * which contains an SCTP packet starting at offset.
+ * Since this function is also called by ipfw, don't assume that
+ * it is compiled on a kernel with SCTP support.
+ */
+uint32_t
+sctp_calculate_cksum(struct mbuf *m, uint32_t offset)
+{
+ uint32_t base = 0xffffffff;
+
+ while (offset > 0) {
+ KASSERT(m != NULL, ("sctp_calculate_cksum, offset > length of mbuf chain"));
+ if (offset < (uint32_t)m->m_len) {
+ break;
+ }
+ offset -= m->m_len;
+ m = m->m_next;
+ }
+ if (offset > 0) {
+ base = calculate_crc32c(base,
+ (unsigned char *)(m->m_data + offset),
+ (unsigned int)(m->m_len - offset));
+ m = m->m_next;
+ }
+ while (m != NULL) {
+ base = calculate_crc32c(base,
+ (unsigned char *)m->m_data,
+ (unsigned int)m->m_len);
+ m = m->m_next;
+ }
+ base = sctp_finalize_crc32c(base);
+ return (base);
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP) || defined(SCTP_SUPPORT)
+
+VNET_DEFINE(struct sctp_base_info, system_base_info);
+
+/*
+ * Compute and insert the SCTP checksum in network byte order for a given
+ * mbuf chain m which contains an SCTP packet starting at offset.
+ */
+void
+sctp_delayed_cksum(struct mbuf *m, uint32_t offset)
+{
+ uint32_t checksum;
+
+ checksum = sctp_calculate_cksum(m, offset);
+ SCTP_STAT_DECR(sctps_sendhwcrc);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ offset += offsetof(struct sctphdr, checksum);
+
+ if (offset + sizeof(uint32_t) > (uint32_t)(m->m_pkthdr.len)) {
+#ifdef INVARIANTS
+ panic("sctp_delayed_cksum(): m->m_pkthdr.len: %d, offset: %u.",
+ m->m_pkthdr.len, offset);
+#else
+ SCTP_PRINTF("sctp_delayed_cksum(): m->m_pkthdr.len: %d, offset: %u.\n",
+ m->m_pkthdr.len, offset);
+#endif
+ return;
+ }
+ m_copyback(m, (int)offset, (int)sizeof(uint32_t), (caddr_t)&checksum);
+}
+#endif
+#endif
+
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.h
new file mode 100644
index 000000000..5e76ff21b
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_crc32.h
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 362338 2020-06-18 19:32:34Z markj $");
+#endif
+
+#ifndef _NETINET_SCTP_CRC32_H_
+#define _NETINET_SCTP_CRC32_H_
+
+#if defined(_KERNEL)
+uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP) || defined(SCTP_SUPPORT)
+void sctp_delayed_cksum(struct mbuf *, uint32_t offset);
+#endif
+#endif
+#endif /* _KERNEL */
+#if defined(__Userspace__)
+uint32_t calculate_crc32c(uint32_t, const unsigned char *, unsigned int);
+uint32_t sctp_finalize_crc32c(uint32_t);
+uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
+#endif
+#endif /* __crc32c_h__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_header.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_header.h
new file mode 100644
index 000000000..602921d0e
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_header.h
@@ -0,0 +1,611 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 309682 2016-12-07 19:30:59Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_HEADER_H_
+#define _NETINET_SCTP_HEADER_H_
+
+#if defined(_WIN32) && !defined(__Userspace__)
+#include <packon.h>
+#endif
+#if !defined(_WIN32)
+#include <sys/time.h>
+#endif
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+
+#if !defined(_WIN32)
+#define SCTP_PACKED __attribute__((packed))
+#else
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#endif
+
+/*
+ * Parameter structures
+ */
+struct sctp_ipv4addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV4_PARAM_TYPE, len=8 */
+ uint32_t addr; /* IPV4 address */
+} SCTP_PACKED;
+
+#define SCTP_V6_ADDR_BYTES 16
+
+
+struct sctp_ipv6addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV6_PARAM_TYPE, len=20 */
+ uint8_t addr[SCTP_V6_ADDR_BYTES]; /* IPV6 address */
+} SCTP_PACKED;
+
+/* Cookie Preservative */
+struct sctp_cookie_perserve_param {
+ struct sctp_paramhdr ph;/* type=SCTP_COOKIE_PRESERVE, len=8 */
+ uint32_t time; /* time in ms to extend cookie */
+} SCTP_PACKED;
+
+#define SCTP_ARRAY_MIN_LEN 1
+/* Host Name Address */
+struct sctp_host_name_param {
+ struct sctp_paramhdr ph;/* type=SCTP_HOSTNAME_ADDRESS */
+ char name[SCTP_ARRAY_MIN_LEN]; /* host name */
+} SCTP_PACKED;
+
+/*
+ * This is the maximum padded size of a s-a-p
+ * so paramheadr + 3 address types (6 bytes) + 2 byte pad = 12
+ */
+#define SCTP_MAX_ADDR_PARAMS_SIZE 12
+/* supported address type */
+struct sctp_supported_addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_SUPPORTED_ADDRTYPE */
+ uint16_t addr_type[2]; /* array of supported address types */
+} SCTP_PACKED;
+
+/* heartbeat info parameter */
+struct sctp_heartbeat_info_param {
+ struct sctp_paramhdr ph;
+ uint32_t time_value_1;
+ uint32_t time_value_2;
+ uint32_t random_value1;
+ uint32_t random_value2;
+ uint8_t addr_family;
+ uint8_t addr_len;
+ /* make sure that this structure is 4 byte aligned */
+ uint8_t padding[2];
+ char address[SCTP_ADDRMAX];
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-prsctp */
+/* PR-SCTP supported parameter */
+struct sctp_prsctp_supported_param {
+ struct sctp_paramhdr ph;
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-addip-sctp */
+struct sctp_asconf_paramhdr { /* an ASCONF "parameter" */
+ struct sctp_paramhdr ph;/* a SCTP parameter header */
+ uint32_t correlation_id;/* correlation id for this param */
+} SCTP_PACKED;
+
+struct sctp_asconf_addr_param { /* an ASCONF address parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv6addr_param addrp; /* max storage size */
+} SCTP_PACKED;
+
+
+struct sctp_asconf_tag_param { /* an ASCONF NAT-Vtag parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ uint32_t local_vtag;
+ uint32_t remote_vtag;
+} SCTP_PACKED;
+
+
+struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv4addr_param addrp; /* max storage size */
+} SCTP_PACKED;
+
+#define SCTP_MAX_SUPPORTED_EXT 256
+
+struct sctp_supported_chunk_types_param {
+ struct sctp_paramhdr ph;/* type = 0x8008 len = x */
+ uint8_t chunk_types[];
+} SCTP_PACKED;
+
+
+/*
+ * Structures for DATA chunks
+ */
+struct sctp_data {
+ uint32_t tsn;
+ uint16_t sid;
+ uint16_t ssn;
+ uint32_t ppid;
+ /* user data follows */
+} SCTP_PACKED;
+
+struct sctp_data_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_data dp;
+} SCTP_PACKED;
+
+struct sctp_idata {
+ uint32_t tsn;
+ uint16_t sid;
+ uint16_t reserved; /* Where does the SSN go? */
+ uint32_t mid;
+ union {
+ uint32_t ppid;
+ uint32_t fsn; /* Fragment Sequence Number */
+ } ppid_fsn;
+ /* user data follows */
+} SCTP_PACKED;
+
+struct sctp_idata_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_idata dp;
+} SCTP_PACKED;
+
+/*
+ * Structures for the control chunks
+ */
+
+/* Initiate (INIT)/Initiate Ack (INIT ACK) */
+struct sctp_init {
+ uint32_t initiate_tag; /* initiate tag */
+ uint32_t a_rwnd; /* a_rwnd */
+ uint16_t num_outbound_streams; /* OS */
+ uint16_t num_inbound_streams; /* MIS */
+ uint32_t initial_tsn; /* I-TSN */
+ /* optional param's follow */
+} SCTP_PACKED;
+#define SCTP_IDENTIFICATION_SIZE 16
+#define SCTP_ADDRESS_SIZE 4
+#if defined(__Userspace__)
+#define SCTP_RESERVE_SPACE 5
+#else
+#define SCTP_RESERVE_SPACE 6
+#endif
+/* state cookie header */
+struct sctp_state_cookie { /* this is our definition... */
+ uint8_t identification[SCTP_IDENTIFICATION_SIZE];/* id of who we are */
+ struct timeval time_entered; /* the time I built cookie */
+ uint32_t cookie_life; /* life I will award this cookie */
+ uint32_t tie_tag_my_vtag; /* my tag in old association */
+
+ uint32_t tie_tag_peer_vtag; /* peers tag in old association */
+ uint32_t peers_vtag; /* peers tag in INIT (for quick ref) */
+
+ uint32_t my_vtag; /* my tag in INIT-ACK (for quick ref) */
+ uint32_t address[SCTP_ADDRESS_SIZE]; /* 4 ints/128 bits */
+ uint32_t addr_type; /* address type */
+ uint32_t laddress[SCTP_ADDRESS_SIZE]; /* my local from address */
+ uint32_t laddr_type; /* my local from address type */
+ uint32_t scope_id; /* v6 scope id for link-locals */
+
+ uint16_t peerport; /* port address of the peer in the INIT */
+ uint16_t myport; /* my port address used in the INIT */
+ uint8_t ipv4_addr_legal;/* Are V4 addr legal? */
+ uint8_t ipv6_addr_legal;/* Are V6 addr legal? */
+#if defined(__Userspace__)
+ uint8_t conn_addr_legal;
+#endif
+ uint8_t local_scope; /* IPv6 local scope flag */
+ uint8_t site_scope; /* IPv6 site scope flag */
+
+ uint8_t ipv4_scope; /* IPv4 private addr scope */
+ uint8_t loopback_scope; /* loopback scope information */
+ uint8_t reserved[SCTP_RESERVE_SPACE]; /* Align to 64 bits */
+ /*
+ * at the end is tacked on the INIT chunk and the INIT-ACK chunk
+ * (minus the cookie).
+ */
+} SCTP_PACKED;
+
+/* state cookie parameter */
+struct sctp_state_cookie_param {
+ struct sctp_paramhdr ph;
+ struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+struct sctp_init_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_init init;
+} SCTP_PACKED;
+
+struct sctp_init_msg {
+ struct sctphdr sh;
+ struct sctp_init_chunk msg;
+} SCTP_PACKED;
+
+/* ... used for both INIT and INIT ACK */
+#define sctp_init_ack sctp_init
+#define sctp_init_ack_chunk sctp_init_chunk
+#define sctp_init_ack_msg sctp_init_msg
+
+
+/* Selective Ack (SACK) */
+struct sctp_gap_ack_block {
+ uint16_t start; /* Gap Ack block start */
+ uint16_t end; /* Gap Ack block end */
+} SCTP_PACKED;
+
+struct sctp_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ /* struct sctp_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_sack sack;
+} SCTP_PACKED;
+
+struct sctp_nr_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_nr_gap_ack_blks; /* number of NR Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ uint16_t reserved; /* not currently used*/
+ /* struct sctp_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_nr_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_nr_sack nr_sack;
+} SCTP_PACKED;
+
+
+/* Heartbeat Request (HEARTBEAT) */
+struct sctp_heartbeat {
+ struct sctp_heartbeat_info_param hb_info;
+} SCTP_PACKED;
+
+struct sctp_heartbeat_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_heartbeat heartbeat;
+} SCTP_PACKED;
+
+/* ... used for Heartbeat Ack (HEARTBEAT ACK) */
+#define sctp_heartbeat_ack sctp_heartbeat
+#define sctp_heartbeat_ack_chunk sctp_heartbeat_chunk
+
+
+/* Abort Asssociation (ABORT) */
+struct sctp_abort_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error cause may follow */
+} SCTP_PACKED;
+
+struct sctp_abort_msg {
+ struct sctphdr sh;
+ struct sctp_abort_chunk msg;
+} SCTP_PACKED;
+
+
+/* Shutdown Association (SHUTDOWN) */
+struct sctp_shutdown_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t cumulative_tsn_ack;
+} SCTP_PACKED;
+
+
+/* Shutdown Acknowledgment (SHUTDOWN ACK) */
+struct sctp_shutdown_ack_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+
+/* Operation Error (ERROR) */
+struct sctp_error_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error causes follow */
+} SCTP_PACKED;
+
+
+/* Cookie Echo (COOKIE ECHO) */
+struct sctp_cookie_echo_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+/* Cookie Acknowledgment (COOKIE ACK) */
+struct sctp_cookie_ack_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+/* Explicit Congestion Notification Echo (ECNE) */
+struct old_sctp_ecne_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+} SCTP_PACKED;
+
+struct sctp_ecne_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+ uint32_t num_pkts_since_cwr;
+} SCTP_PACKED;
+
+/* Congestion Window Reduced (CWR) */
+struct sctp_cwr_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+} SCTP_PACKED;
+
+/* Shutdown Complete (SHUTDOWN COMPLETE) */
+struct sctp_shutdown_complete_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+struct sctp_adaptation_layer_indication {
+ struct sctp_paramhdr ph;
+ uint32_t indication;
+} SCTP_PACKED;
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ */
+/* Address/Stream Configuration Change (ASCONF) */
+struct sctp_asconf_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* lookup address parameter (mandatory) */
+ /* asconf parameters follow */
+} SCTP_PACKED;
+
+/* Address/Stream Configuration Acknowledge (ASCONF ACK) */
+struct sctp_asconf_ack_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* asconf parameters follow */
+} SCTP_PACKED;
+
+/* draft-ietf-tsvwg-prsctp */
+/* Forward Cumulative TSN (FORWARD TSN) */
+struct sctp_forward_tsn_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t new_cumulative_tsn;
+ /* stream/sequence pairs (sctp_strseq) follow */
+} SCTP_PACKED;
+
+struct sctp_strseq {
+ uint16_t sid;
+ uint16_t ssn;
+} SCTP_PACKED;
+
+struct sctp_strseq_mid {
+ uint16_t sid;
+ uint16_t flags;
+ uint32_t mid;
+};
+
+struct sctp_forward_tsn_msg {
+ struct sctphdr sh;
+ struct sctp_forward_tsn_chunk msg;
+} SCTP_PACKED;
+
+/* should be a multiple of 4 - 1 aka 3/7/11 etc. */
+
+#define SCTP_NUM_DB_TO_VERIFY 31
+
+struct sctp_chunk_desc {
+ uint8_t chunk_type;
+ uint8_t data_bytes[SCTP_NUM_DB_TO_VERIFY];
+ uint32_t tsn_ifany;
+} SCTP_PACKED;
+
+
+struct sctp_pktdrop_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t bottle_bw;
+ uint32_t current_onq;
+ uint16_t trunc_len;
+ uint16_t reserved;
+ uint8_t data[];
+} SCTP_PACKED;
+
+/**********STREAM RESET STUFF ******************/
+
+struct sctp_stream_reset_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_out_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq; /* monotonically increasing seq no */
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t send_reset_at_tsn; /* last TSN I assigned outbound */
+ uint16_t list_of_streams[]; /* if not all list of streams */
+} SCTP_PACKED;
+
+struct sctp_stream_reset_in_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+ uint16_t list_of_streams[]; /* if not all list of streams */
+} SCTP_PACKED;
+
+struct sctp_stream_reset_tsn_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response_tsn {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+ uint32_t senders_next_tsn;
+ uint32_t receivers_next_tsn;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_add_strm {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+ uint16_t number_of_streams;
+ uint16_t reserved;
+} SCTP_PACKED;
+
+#define SCTP_STREAM_RESET_RESULT_NOTHING_TO_DO 0x00000000 /* XXX: unused */
+#define SCTP_STREAM_RESET_RESULT_PERFORMED 0x00000001
+#define SCTP_STREAM_RESET_RESULT_DENIED 0x00000002
+#define SCTP_STREAM_RESET_RESULT_ERR__WRONG_SSN 0x00000003 /* XXX: unused */
+#define SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS 0x00000004
+#define SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO 0x00000005
+#define SCTP_STREAM_RESET_RESULT_IN_PROGRESS 0x00000006 /* XXX: unused */
+
+/*
+ * convience structures, note that if you are making a request for specific
+ * streams then the request will need to be an overlay structure.
+ */
+
+struct sctp_stream_reset_tsn_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_tsn_request sr_req;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_resp {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response sr_resp;
+} SCTP_PACKED;
+
+/* respone only valid with a TSN request */
+struct sctp_stream_reset_resp_tsn {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response_tsn sr_resp;
+} SCTP_PACKED;
+
+/****************************************************/
+
+/*
+ * Authenticated chunks support draft-ietf-tsvwg-sctp-auth
+ */
+
+/* Should we make the max be 32? */
+#define SCTP_RANDOM_MAX_SIZE 256
+struct sctp_auth_random {
+ struct sctp_paramhdr ph;/* type = 0x8002 */
+ uint8_t random_data[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk_list {
+ struct sctp_paramhdr ph;/* type = 0x8003 */
+ uint8_t chunk_types[];
+} SCTP_PACKED;
+
+struct sctp_auth_hmac_algo {
+ struct sctp_paramhdr ph;/* type = 0x8004 */
+ uint16_t hmac_ids[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk {
+ struct sctp_chunkhdr ch;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ uint8_t hmac[];
+} SCTP_PACKED;
+
+/*
+ * we pre-reserve enough room for a ECNE or CWR AND a SACK with no missing
+ * pieces. If ENCE is missing we could have a couple of blocks. This way we
+ * optimize so we MOST likely can bundle a SACK/ECN with the smallest size
+ * data chunk I will split into. We could increase throughput slightly by
+ * taking out these two but the 24-sack/8-CWR i.e. 32 bytes I pre-reserve I
+ * feel is worth it for now.
+ */
+#ifndef SCTP_MAX_OVERHEAD
+#ifdef INET6
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip6_hdr))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip6_hdr))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip6_hdr) + \
+ sizeof(struct sctphdr))
+
+#else
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#endif /* INET6 */
+#endif /* !SCTP_MAX_OVERHEAD */
+
+#define SCTP_MED_V4_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+#define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#if defined(_WIN32) && !defined(__Userspace__)
+#include <packoff.h>
+#endif
+#if defined(_WIN32) && defined(__Userspace__)
+#pragma pack(pop)
+#endif
+#undef SCTP_PACKED
+#endif /* !__sctp_header_h__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.c
new file mode 100644
index 000000000..e8e42e661
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.c
@@ -0,0 +1,5840 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 363440 2020-07-23 01:35:24Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_crc32.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_lock_bsd.h>
+#endif
+/*
+ * NOTES: On the outbound side of things I need to check the sack timer to
+ * see if I should generate a sack into the chunk queue (if I have data to
+ * send that is and will be sending it .. for bundling.
+ *
+ * The callback in sctp_usrreq.c will get called when the socket is read from.
+ * This will cause sctp_service_queues() to get called on the top entry in
+ * the list.
+ */
+static uint32_t
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+ struct sctp_stream_in *strm,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_tmit_chunk *chk, int hold_rlock);
+
+
+void
+sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
+}
+
+/* Calculate what the rwnd would be */
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ uint32_t calc = 0;
+
+ /*
+ * This is really set wrong with respect to a 1-2-m socket. Since
+ * the sb_cc is the count that everyone as put up. When we re-write
+ * sctp_soreceive then we will fix this so that ONLY this
+ * associations data is taken into account.
+ */
+ if (stcb->sctp_socket == NULL) {
+ return (calc);
+ }
+
+ KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
+ ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
+ KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
+ ("size_on_all_streams is %u", asoc->size_on_all_streams));
+ if (stcb->asoc.sb_cc == 0 &&
+ asoc->cnt_on_reasm_queue == 0 &&
+ asoc->cnt_on_all_streams == 0) {
+ /* Full rwnd granted */
+ calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
+ return (calc);
+ }
+ /* get actual space */
+ calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
+ /*
+ * take out what has NOT been put on socket queue and we yet hold
+ * for putting up.
+ */
+ calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
+ asoc->cnt_on_reasm_queue * MSIZE));
+ calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
+ asoc->cnt_on_all_streams * MSIZE));
+ if (calc == 0) {
+ /* out of space */
+ return (calc);
+ }
+
+ /* what is the overhead of all these rwnd's */
+ calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
+ /* If the window gets too small due to ctrl-stuff, reduce it
+ * to 1, even it is 0. SWS engaged
+ */
+ if (calc < stcb->asoc.my_rwnd_control_len) {
+ calc = 1;
+ }
+ return (calc);
+}
+
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t sid,
+ uint32_t mid, uint8_t flags,
+ struct mbuf *dm)
+{
+ struct sctp_queued_to_read *read_queue_e = NULL;
+
+ sctp_alloc_a_readq(stcb, read_queue_e);
+ if (read_queue_e == NULL) {
+ goto failed_build;
+ }
+ memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
+ read_queue_e->sinfo_stream = sid;
+ read_queue_e->sinfo_flags = (flags << 8);
+ read_queue_e->sinfo_ppid = ppid;
+ read_queue_e->sinfo_context = context;
+ read_queue_e->sinfo_tsn = tsn;
+ read_queue_e->sinfo_cumtsn = tsn;
+ read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->mid = mid;
+ read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
+ TAILQ_INIT(&read_queue_e->reasm);
+ read_queue_e->whoFrom = net;
+ atomic_add_int(&net->ref_count, 1);
+ read_queue_e->data = dm;
+ read_queue_e->stcb = stcb;
+ read_queue_e->port_from = stcb->rport;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ read_queue_e->do_not_ref_stcb = 1;
+ }
+failed_build:
+ return (read_queue_e);
+}
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
+{
+ struct sctp_extrcvinfo *seinfo;
+ struct sctp_sndrcvinfo *outinfo;
+ struct sctp_rcvinfo *rcvinfo;
+ struct sctp_nxtinfo *nxtinfo;
+#if defined(_WIN32)
+ WSACMSGHDR *cmh;
+#else
+ struct cmsghdr *cmh;
+#endif
+ struct mbuf *ret;
+ int len;
+ int use_extended;
+ int provide_nxt;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
+ /* user does not want any ancillary data */
+ return (NULL);
+ }
+
+ len = 0;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
+ len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
+ }
+ seinfo = (struct sctp_extrcvinfo *)sinfo;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
+ (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
+ provide_nxt = 1;
+ len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
+ } else {
+ provide_nxt = 0;
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ use_extended = 1;
+ len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
+ } else {
+ use_extended = 0;
+ len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
+ }
+ } else {
+ use_extended = 0;
+ }
+
+ ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+ if (ret == NULL) {
+ /* No space */
+ return (ret);
+ }
+ SCTP_BUF_LEN(ret) = 0;
+
+ /* We need a CMSG header followed by the struct */
+#if defined(_WIN32)
+ cmh = mtod(ret, WSACMSGHDR *);
+#else
+ cmh = mtod(ret, struct cmsghdr *);
+#endif
+ /*
+ * Make sure that there is no un-initialized padding between
+ * the cmsg header and cmsg data and after the cmsg data.
+ */
+ memset(cmh, 0, len);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
+ cmh->cmsg_level = IPPROTO_SCTP;
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
+ cmh->cmsg_type = SCTP_RCVINFO;
+ rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
+ rcvinfo->rcv_sid = sinfo->sinfo_stream;
+ rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
+ rcvinfo->rcv_flags = sinfo->sinfo_flags;
+ rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
+ rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
+ rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
+ rcvinfo->rcv_context = sinfo->sinfo_context;
+ rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
+#if defined(_WIN32)
+ cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
+#else
+ cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
+#endif
+ SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
+ }
+ if (provide_nxt) {
+ cmh->cmsg_level = IPPROTO_SCTP;
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
+ cmh->cmsg_type = SCTP_NXTINFO;
+ nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
+ nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
+ nxtinfo->nxt_flags = 0;
+ if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
+ nxtinfo->nxt_flags |= SCTP_UNORDERED;
+ }
+ if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
+ nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
+ }
+ if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
+ nxtinfo->nxt_flags |= SCTP_COMPLETE;
+ }
+ nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
+ nxtinfo->nxt_length = seinfo->serinfo_next_length;
+ nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
+#if defined(_WIN32)
+ cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
+#else
+ cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
+#endif
+ SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+ cmh->cmsg_level = IPPROTO_SCTP;
+ outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
+ if (use_extended) {
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
+ cmh->cmsg_type = SCTP_EXTRCV;
+ memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
+ SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
+ } else {
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+ cmh->cmsg_type = SCTP_SNDRCV;
+ *outinfo = *sinfo;
+ SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
+ }
+ }
+ return (ret);
+}
+
+
+static void
+sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
+{
+ uint32_t gap, i, cumackp1;
+ int fnd = 0;
+ int in_r=0, in_nr=0;
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ return;
+ }
+ cumackp1 = asoc->cumulative_tsn + 1;
+ if (SCTP_TSN_GT(cumackp1, tsn)) {
+ /* this tsn is behind the cum ack and thus we don't
+ * need to worry about it being moved from one to the other.
+ */
+ return;
+ }
+ SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+ in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
+ in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if ((in_r == 0) && (in_nr == 0)) {
+#ifdef INVARIANTS
+ panic("Things are really messed up now");
+#else
+ SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
+ sctp_print_mapping_array(asoc);
+#endif
+ }
+ if (in_nr == 0)
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (in_r)
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ if (tsn == asoc->highest_tsn_inside_map) {
+ /* We must back down to see what the new highest is */
+ for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
+ SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ asoc->highest_tsn_inside_map = i;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+ }
+ }
+}
+
+static int
+sctp_place_control_in_stream(struct sctp_stream_in *strm,
+ struct sctp_association *asoc,
+ struct sctp_queued_to_read *control)
+{
+ struct sctp_queued_to_read *at;
+ struct sctp_readhead *q;
+ uint8_t flags, unordered;
+
+ flags = (control->sinfo_flags >> 8);
+ unordered = flags & SCTP_DATA_UNORDERED;
+ if (unordered) {
+ q = &strm->uno_inqueue;
+ if (asoc->idata_supported == 0) {
+ if (!TAILQ_EMPTY(q)) {
+ /* Only one stream can be here in old style -- abort */
+ return (-1);
+ }
+ TAILQ_INSERT_TAIL(q, control, next_instrm);
+ control->on_strm_q = SCTP_ON_UNORDERED;
+ return (0);
+ }
+ } else {
+ q = &strm->inqueue;
+ }
+ if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ control->end_added = 1;
+ control->first_frag_seen = 1;
+ control->last_frag_seen = 1;
+ }
+ if (TAILQ_EMPTY(q)) {
+ /* Empty queue */
+ TAILQ_INSERT_HEAD(q, control, next_instrm);
+ if (unordered) {
+ control->on_strm_q = SCTP_ON_UNORDERED;
+ } else {
+ control->on_strm_q = SCTP_ON_ORDERED;
+ }
+ return (0);
+ } else {
+ TAILQ_FOREACH(at, q, next_instrm) {
+ if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
+ /*
+ * one in queue is bigger than the
+ * new one, insert before this one
+ */
+ TAILQ_INSERT_BEFORE(at, control, next_instrm);
+ if (unordered) {
+ control->on_strm_q = SCTP_ON_UNORDERED;
+ } else {
+ control->on_strm_q = SCTP_ON_ORDERED ;
+ }
+ break;
+ } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
+ /*
+ * Gak, He sent me a duplicate msg
+ * id number?? return -1 to abort.
+ */
+ return (-1);
+ } else {
+ if (TAILQ_NEXT(at, next_instrm) == NULL) {
+ /*
+ * We are at the end, insert
+ * it after this one
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, at,
+ SCTP_STR_LOG_FROM_INSERT_TL);
+ }
+ TAILQ_INSERT_AFTER(q, at, control, next_instrm);
+ if (unordered) {
+ control->on_strm_q = SCTP_ON_UNORDERED ;
+ } else {
+ control->on_strm_q = SCTP_ON_ORDERED ;
+ }
+ break;
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+static void
+sctp_abort_in_reasm(struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sctp_tmit_chunk *chk,
+ int *abort_flag, int opspot)
+{
+ char msg[SCTP_DIAG_INFO_LEN];
+ struct mbuf *oper;
+
+ if (stcb->asoc.idata_supported) {
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
+ opspot,
+ control->fsn_included,
+ chk->rec.data.tsn,
+ chk->rec.data.sid,
+ chk->rec.data.fsn, chk->rec.data.mid);
+ } else {
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
+ opspot,
+ control->fsn_included,
+ chk->rec.data.tsn,
+ chk->rec.data.sid,
+ chk->rec.data.fsn,
+ (uint16_t)chk->rec.data.mid);
+ }
+ oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+}
+
+static void
+sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
+{
+ /*
+ * The control could not be placed and must be cleaned.
+ */
+ struct sctp_tmit_chunk *chk, *nchk;
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ if (chk->data)
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ sctp_free_remote_addr(control->whoFrom);
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+}
+
+/*
+ * Queue the chunk either right into the socket buffer if it is the next one
+ * to go OR put it in the correct place in the delivery queue. If we do
+ * append to the so_buf, keep doing so until we are out of order as
+ * long as the control's entered are non-fragmented.
+ */
+static void
+sctp_queue_data_to_stream(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
+{
+ /*
+ * FIX-ME maybe? What happens when the ssn wraps? If we are getting
+ * all the data in one stream this could happen quite rapidly. One
+ * could use the TSN to keep track of things, but this scheme breaks
+ * down in the other type of stream usage that could occur. Send a
+ * single msg to stream 0, send 4Billion messages to stream 1, now
+ * send a message to stream 0. You have a situation where the TSN
+ * has wrapped but not in the stream. Is this worth worrying about
+ * or should we just change our queue sort at the bottom to be by
+ * TSN.
+ *
+ * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
+ * with TSN 1? If the peer is doing some sort of funky TSN/SSN
+ * assignment this could happen... and I don't see how this would be
+ * a violation. So for now I am undecided an will leave the sort by
+ * SSN alone. Maybe a hybred approach is the answer
+ *
+ */
+ struct sctp_queued_to_read *at;
+ int queue_needed;
+ uint32_t nxt_todel;
+ struct mbuf *op_err;
+ struct sctp_stream_in *strm;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ strm = &asoc->strmin[control->sinfo_stream];
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
+ }
+ if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
+ /* The incoming sseq is behind where we last delivered? */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
+ strm->last_mid_delivered, control->mid);
+ /*
+ * throw it in the stream so it gets cleaned up in
+ * association destruction
+ */
+ TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
+ if (asoc->idata_supported) {
+ SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+ strm->last_mid_delivered, control->sinfo_tsn,
+ control->sinfo_stream, control->mid);
+ } else {
+ SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+ (uint16_t)strm->last_mid_delivered,
+ control->sinfo_tsn,
+ control->sinfo_stream,
+ (uint16_t)control->mid);
+ }
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return;
+
+ }
+ queue_needed = 1;
+ asoc->size_on_all_streams += control->length;
+ sctp_ucount_incr(asoc->cnt_on_all_streams);
+ nxt_todel = strm->last_mid_delivered + 1;
+ if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ /* can be delivered right away? */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
+ }
+ /* EY it wont be queued if it could be delivered directly */
+ queue_needed = 0;
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_mid_delivered++;
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
+ TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
+ /* all delivered */
+ nxt_todel = strm->last_mid_delivered + 1;
+ if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
+ (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
+ if (control->on_strm_q == SCTP_ON_ORDERED) {
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+#ifdef INVARIANTS
+ } else {
+ panic("Huh control: %p is on_strm_q: %d",
+ control, control->on_strm_q);
+#endif
+ }
+ control->on_strm_q = 0;
+ strm->last_mid_delivered++;
+ /*
+ * We ignore the return of deliver_data here
+ * since we always can hold the chunk on the
+ * d-queue. And we have a finite number that
+ * can be delivered from the strq.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL,
+ SCTP_STR_LOG_FROM_IMMED_DEL);
+ }
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ SCTP_SO_LOCKED);
+ continue;
+ } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+ *need_reasm = 1;
+ }
+ break;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ if (queue_needed) {
+ /*
+ * Ok, we did not deliver this guy, find the correct place
+ * to put it on the queue.
+ */
+ if (sctp_place_control_in_stream(strm, asoc, control)) {
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Queue to str MID: %u duplicate", control->mid);
+ sctp_clean_up_control(stcb, control);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ }
+ }
+}
+
+
+static void
+sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
+{
+ struct mbuf *m, *prev = NULL;
+ struct sctp_tcb *stcb;
+
+ stcb = control->stcb;
+ control->held_length = 0;
+ control->length = 0;
+ m = control->data;
+ while (m) {
+ if (SCTP_BUF_LEN(m) == 0) {
+ /* Skip mbufs with NO length */
+ if (prev == NULL) {
+ /* First one */
+ control->data = sctp_m_free(m);
+ m = control->data;
+ } else {
+ SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+ m = SCTP_BUF_NEXT(prev);
+ }
+ if (m == NULL) {
+ control->tail_mbuf = prev;
+ }
+ continue;
+ }
+ prev = m;
+ atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+ if (control->on_read_q) {
+ /*
+ * On read queue so we must increment the
+ * SB stuff, we assume caller has done any locks of SB.
+ */
+ sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (prev) {
+ control->tail_mbuf = prev;
+ }
+}
+
+static void
+sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
+{
+ struct mbuf *prev=NULL;
+ struct sctp_tcb *stcb;
+
+ stcb = control->stcb;
+ if (stcb == NULL) {
+#ifdef INVARIANTS
+ panic("Control broken");
+#else
+ return;
+#endif
+ }
+ if (control->tail_mbuf == NULL) {
+ /* TSNH */
+ sctp_m_freem(control->data);
+ control->data = m;
+ sctp_setup_tail_pointer(control);
+ return;
+ }
+ control->tail_mbuf->m_next = m;
+ while (m) {
+ if (SCTP_BUF_LEN(m) == 0) {
+ /* Skip mbufs with NO length */
+ if (prev == NULL) {
+ /* First one */
+ control->tail_mbuf->m_next = sctp_m_free(m);
+ m = control->tail_mbuf->m_next;
+ } else {
+ SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+ m = SCTP_BUF_NEXT(prev);
+ }
+ if (m == NULL) {
+ control->tail_mbuf = prev;
+ }
+ continue;
+ }
+ prev = m;
+ if (control->on_read_q) {
+ /*
+ * On read queue so we must increment the
+ * SB stuff, we assume caller has done any locks of SB.
+ */
+ sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+ }
+ *added += SCTP_BUF_LEN(m);
+ atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (prev) {
+ control->tail_mbuf = prev;
+ }
+}
+
+static void
+sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
+{
+ memset(nc, 0, sizeof(struct sctp_queued_to_read));
+ nc->sinfo_stream = control->sinfo_stream;
+ nc->mid = control->mid;
+ TAILQ_INIT(&nc->reasm);
+ nc->top_fsn = control->top_fsn;
+ nc->mid = control->mid;
+ nc->sinfo_flags = control->sinfo_flags;
+ nc->sinfo_ppid = control->sinfo_ppid;
+ nc->sinfo_context = control->sinfo_context;
+ nc->fsn_included = 0xffffffff;
+ nc->sinfo_tsn = control->sinfo_tsn;
+ nc->sinfo_cumtsn = control->sinfo_cumtsn;
+ nc->sinfo_assoc_id = control->sinfo_assoc_id;
+ nc->whoFrom = control->whoFrom;
+ atomic_add_int(&nc->whoFrom->ref_count, 1);
+ nc->stcb = control->stcb;
+ nc->port_from = control->port_from;
+ nc->do_not_ref_stcb = control->do_not_ref_stcb;
+}
+
+static void
+sctp_reset_a_control(struct sctp_queued_to_read *control,
+ struct sctp_inpcb *inp, uint32_t tsn)
+{
+ control->fsn_included = tsn;
+ if (control->on_read_q) {
+ /*
+ * We have to purge it from there,
+ * hopefully this will work :-)
+ */
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ control->on_read_q = 0;
+ }
+}
+
+static int
+sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_in *strm,
+ struct sctp_queued_to_read *control,
+ uint32_t pd_point,
+ int inp_read_lock_held)
+{
+ /* Special handling for the old un-ordered data chunk.
+ * All the chunks/TSN's go to mid 0. So
+ * we have to do the old style watching to see
+ * if we have it all. If you return one, no other
+ * control entries on the un-ordered queue will
+ * be looked at. In theory there should be no others
+ * entries in reality, unless the guy is sending both
+ * unordered NDATA and unordered DATA...
+ */
+ struct sctp_tmit_chunk *chk, *lchk, *tchk;
+ uint32_t fsn;
+ struct sctp_queued_to_read *nc;
+ int cnt_added;
+
+ if (control->first_frag_seen == 0) {
+ /* Nothing we can do, we have not seen the first piece yet */
+ return (1);
+ }
+ /* Collapse any we can */
+ cnt_added = 0;
+restart:
+ fsn = control->fsn_included + 1;
+ /* Now what can we add? */
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
+ if (chk->rec.data.fsn == fsn) {
+ /* Ok lets add it */
+ sctp_alloc_a_readq(stcb, nc);
+ if (nc == NULL) {
+ break;
+ }
+ memset(nc, 0, sizeof(struct sctp_queued_to_read));
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
+ fsn++;
+ cnt_added++;
+ chk = NULL;
+ if (control->end_added) {
+ /* We are done */
+ if (!TAILQ_EMPTY(&control->reasm)) {
+ /*
+ * Ok we have to move anything left on
+ * the control queue to a new control.
+ */
+ sctp_build_readq_entry_from_ctl(nc, control);
+ tchk = TAILQ_FIRST(&control->reasm);
+ if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+ if (asoc->size_on_reasm_queue >= tchk->send_size) {
+ asoc->size_on_reasm_queue -= tchk->send_size;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
+#else
+ asoc->size_on_reasm_queue = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ nc->first_frag_seen = 1;
+ nc->fsn_included = tchk->rec.data.fsn;
+ nc->data = tchk->data;
+ nc->sinfo_ppid = tchk->rec.data.ppid;
+ nc->sinfo_tsn = tchk->rec.data.tsn;
+ sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
+ tchk->data = NULL;
+ sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
+ sctp_setup_tail_pointer(nc);
+ tchk = TAILQ_FIRST(&control->reasm);
+ }
+ /* Spin the rest onto the queue */
+ while (tchk) {
+ TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+ TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
+ tchk = TAILQ_FIRST(&control->reasm);
+ }
+ /* Now lets add it to the queue after removing control */
+ TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
+ nc->on_strm_q = SCTP_ON_UNORDERED;
+ if (control->on_strm_q) {
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+ }
+ }
+ if (control->pdapi_started) {
+ strm->pd_api_started = 0;
+ control->pdapi_started = 0;
+ }
+ if (control->on_strm_q) {
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ }
+ if (control->on_read_q == 0) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+#if defined(__Userspace__)
+ } else {
+ sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
+#endif
+ }
+ sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
+ /* Switch to the new guy and continue */
+ control = nc;
+ goto restart;
+ } else {
+ if (nc->on_strm_q == 0) {
+ sctp_free_a_readq(stcb, nc);
+ }
+ }
+ return (1);
+ } else {
+ sctp_free_a_readq(stcb, nc);
+ }
+ } else {
+ /* Can't add more */
+ break;
+ }
+ }
+ if (cnt_added && strm->pd_api_started) {
+#if defined(__Userspace__)
+ sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
+#endif
+ sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+ if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
+ strm->pd_api_started = 1;
+ control->pdapi_started = 1;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+ sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (0);
+ } else {
+ return (1);
+ }
+}
+
+static void
+sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_queued_to_read *control,
+ struct sctp_tmit_chunk *chk,
+ int *abort_flag)
+{
+ struct sctp_tmit_chunk *at;
+ int inserted;
+ /*
+ * Here we need to place the chunk into the control structure
+ * sorted in the correct order.
+ */
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /* Its the very first one. */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "chunk is a first fsn: %u becomes fsn_included\n",
+ chk->rec.data.fsn);
+ at = TAILQ_FIRST(&control->reasm);
+ if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
+ /*
+ * The first chunk in the reassembly is
+ * a smaller TSN than this one, even though
+ * this has a first, it must be from a subsequent
+ * msg.
+ */
+ goto place_chunk;
+ }
+ if (control->first_frag_seen) {
+ /*
+ * In old un-ordered we can reassembly on
+ * one control multiple messages. As long
+ * as the next FIRST is greater then the old
+ * first (TSN i.e. FSN wise)
+ */
+ struct mbuf *tdata;
+ uint32_t tmp;
+
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
+ /* Easy way the start of a new guy beyond the lowest */
+ goto place_chunk;
+ }
+ if ((chk->rec.data.fsn == control->fsn_included) ||
+ (control->pdapi_started)) {
+ /*
+ * Ok this should not happen, if it does
+ * we started the pd-api on the higher TSN (since
+ * the equals part is a TSN failure it must be that).
+ *
+ * We are completly hosed in that case since I have
+ * no way to recover. This really will only happen
+ * if we can get more TSN's higher before the pd-api-point.
+ */
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
+
+ return;
+ }
+ /*
+ * Ok we have two firsts and the one we just got
+ * is smaller than the one we previously placed.. yuck!
+ * We must swap them out.
+ */
+ /* swap the mbufs */
+ tdata = control->data;
+ control->data = chk->data;
+ chk->data = tdata;
+ /* Save the lengths */
+ chk->send_size = control->length;
+ /* Recompute length of control and tail pointer */
+ sctp_setup_tail_pointer(control);
+ /* Fix the FSN included */
+ tmp = control->fsn_included;
+ control->fsn_included = chk->rec.data.fsn;
+ chk->rec.data.fsn = tmp;
+ /* Fix the TSN included */
+ tmp = control->sinfo_tsn;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ chk->rec.data.tsn = tmp;
+ /* Fix the PPID included */
+ tmp = control->sinfo_ppid;
+ control->sinfo_ppid = chk->rec.data.ppid;
+ chk->rec.data.ppid = tmp;
+ /* Fix tail pointer */
+ goto place_chunk;
+ }
+ control->first_frag_seen = 1;
+ control->fsn_included = chk->rec.data.fsn;
+ control->top_fsn = chk->rec.data.fsn;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ control->sinfo_ppid = chk->rec.data.ppid;
+ control->data = chk->data;
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ sctp_setup_tail_pointer(control);
+ return;
+ }
+place_chunk:
+ inserted = 0;
+ TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+ if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
+ /*
+ * This one in queue is bigger than the new one, insert
+ * the new one before at.
+ */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ inserted = 1;
+ TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+ break;
+ } else if (at->rec.data.fsn == chk->rec.data.fsn) {
+ /*
+ * They sent a duplicate fsn number. This
+ * really should not happen since the FSN is
+ * a TSN and it should have been dropped earlier.
+ */
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
+ return;
+ }
+
+ }
+ if (inserted == 0) {
+ /* Its at the end */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ control->top_fsn = chk->rec.data.fsn;
+ TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+ }
+}
+
+static int
+sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_in *strm, int inp_read_lock_held)
+{
+ /*
+ * Given a stream, strm, see if any of
+ * the SSN's on it that are fragmented
+ * are ready to deliver. If so go ahead
+ * and place them on the read queue. In
+ * so placing if we have hit the end, then
+ * we need to remove them from the stream's queue.
+ */
+ struct sctp_queued_to_read *control, *nctl = NULL;
+ uint32_t next_to_del;
+ uint32_t pd_point;
+ int ret = 0;
+
+ if (stcb->sctp_socket) {
+ pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
+ stcb->sctp_ep->partial_delivery_point);
+ } else {
+ pd_point = stcb->sctp_ep->partial_delivery_point;
+ }
+ control = TAILQ_FIRST(&strm->uno_inqueue);
+
+ if ((control != NULL) &&
+ (asoc->idata_supported == 0)) {
+ /* Special handling needed for "old" data format */
+ if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
+ goto done_un;
+ }
+ }
+ if (strm->pd_api_started) {
+ /* Can't add more */
+ return (0);
+ }
+ while (control) {
+ SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
+ control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
+ nctl = TAILQ_NEXT(control, next_instrm);
+ if (control->end_added) {
+ /* We just put the last bit on */
+ if (control->on_strm_q) {
+#ifdef INVARIANTS
+ if (control->on_strm_q != SCTP_ON_UNORDERED ) {
+ panic("Huh control: %p on_q: %d -- not unordered?",
+ control, control->on_strm_q);
+ }
+#endif
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ control->on_strm_q = 0;
+ }
+ if (control->on_read_q == 0) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /* Can we do a PD-API for this un-ordered guy? */
+ if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
+ strm->pd_api_started = 1;
+ control->pdapi_started = 1;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+
+ break;
+ }
+ }
+ control = nctl;
+ }
+done_un:
+ control = TAILQ_FIRST(&strm->inqueue);
+ if (strm->pd_api_started) {
+ /* Can't add more */
+ return (0);
+ }
+ if (control == NULL) {
+ return (ret);
+ }
+ if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
+ /* Ok the guy at the top was being partially delivered
+ * completed, so we remove it. Note
+ * the pd_api flag was taken off when the
+ * chunk was merged on in sctp_queue_data_for_reasm below.
+ */
+ nctl = TAILQ_NEXT(control, next_instrm);
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
+ control, control->end_added, control->mid,
+ control->top_fsn, control->fsn_included,
+ strm->last_mid_delivered);
+ if (control->end_added) {
+ if (control->on_strm_q) {
+#ifdef INVARIANTS
+ if (control->on_strm_q != SCTP_ON_ORDERED ) {
+ panic("Huh control: %p on_q: %d -- not ordered?",
+ control, control->on_strm_q);
+ }
+#endif
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ control->on_strm_q = 0;
+ }
+ if (strm->pd_api_started && control->pdapi_started) {
+ control->pdapi_started = 0;
+ strm->pd_api_started = 0;
+ }
+ if (control->on_read_q == 0) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+ }
+ control = nctl;
+ }
+ }
+ if (strm->pd_api_started) {
+ /* Can't add more must have gotten an un-ordered above being partially delivered. */
+ return (0);
+ }
+deliver_more:
+ next_to_del = strm->last_mid_delivered + 1;
+ if (control) {
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
+ control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
+ next_to_del);
+ nctl = TAILQ_NEXT(control, next_instrm);
+ if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
+ (control->first_frag_seen)) {
+ int done;
+
+ /* Ok we can deliver it onto the stream. */
+ if (control->end_added) {
+ /* We are done with it afterwards */
+ if (control->on_strm_q) {
+#ifdef INVARIANTS
+ if (control->on_strm_q != SCTP_ON_ORDERED ) {
+ panic("Huh control: %p on_q: %d -- not ordered?",
+ control, control->on_strm_q);
+ }
+#endif
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ control->on_strm_q = 0;
+ }
+ ret++;
+ }
+ if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ /* A singleton now slipping through - mark it non-revokable too */
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ } else if (control->end_added == 0) {
+ /* Check if we can defer adding until its all there */
+ if ((control->length < pd_point) || (strm->pd_api_started)) {
+ /* Don't need it or cannot add more (one being delivered that way) */
+ goto out;
+ }
+ }
+ done = (control->end_added) && (control->last_frag_seen);
+ if (control->on_read_q == 0) {
+ if (!done) {
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ strm->pd_api_started = 1;
+ control->pdapi_started = 1;
+ }
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+ }
+ strm->last_mid_delivered = next_to_del;
+ if (done) {
+ control = nctl;
+ goto deliver_more;
+ }
+ }
+ }
+out:
+ return (ret);
+}
+
+
+uint32_t
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+ struct sctp_stream_in *strm,
+ struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *chk, int hold_rlock)
+{
+ /*
+ * Given a control and a chunk, merge the
+ * data from the chk onto the control and free
+ * up the chunk resources.
+ */
+ uint32_t added=0;
+ int i_locked = 0;
+
+ if (control->on_read_q && (hold_rlock == 0)) {
+ /*
+ * Its being pd-api'd so we must
+ * do some locks.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ i_locked = 1;
+ }
+ if (control->data == NULL) {
+ control->data = chk->data;
+ sctp_setup_tail_pointer(control);
+ } else {
+ sctp_add_to_tail_pointer(control, chk->data, &added);
+ }
+ control->fsn_included = chk->rec.data.fsn;
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+ chk->data = NULL;
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ control->first_frag_seen = 1;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ control->sinfo_ppid = chk->rec.data.ppid;
+ }
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* Its complete */
+ if ((control->on_strm_q) && (control->on_read_q)) {
+ if (control->pdapi_started) {
+ control->pdapi_started = 0;
+ strm->pd_api_started = 0;
+ }
+ if (control->on_strm_q == SCTP_ON_UNORDERED) {
+ /* Unordered */
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+ } else if (control->on_strm_q == SCTP_ON_ORDERED) {
+ /* Ordered */
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ /*
+ * Don't need to decrement size_on_all_streams,
+ * since control is on the read queue.
+ */
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ control->on_strm_q = 0;
+#ifdef INVARIANTS
+ } else if (control->on_strm_q) {
+ panic("Unknown state on ctrl: %p on_strm_q: %d", control,
+ control->on_strm_q);
+#endif
+ }
+ }
+ control->end_added = 1;
+ control->last_frag_seen = 1;
+ }
+ if (i_locked) {
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return (added);
+}
+
+/*
+ * Dump onto the re-assembly queue, in its proper place. After dumping on the
+ * queue, see if anthing can be delivered. If so pull it off (or as much as
+ * we can. If we run out of space then we must dump what we can and set the
+ * appropriate flag to say we queued what we could.
+ */
+static void
+sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_queued_to_read *control,
+ struct sctp_tmit_chunk *chk,
+ int created_control,
+ int *abort_flag, uint32_t tsn)
+{
+ uint32_t next_fsn;
+ struct sctp_tmit_chunk *at, *nat;
+ struct sctp_stream_in *strm;
+ int do_wakeup, unordered;
+ uint32_t lenadded;
+
+ strm = &asoc->strmin[control->sinfo_stream];
+ /*
+ * For old un-ordered data chunks.
+ */
+ if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+ unordered = 1;
+ } else {
+ unordered = 0;
+ }
+ /* Must be added to the stream-in queue */
+ if (created_control) {
+ if ((unordered == 0) || (asoc->idata_supported)) {
+ sctp_ucount_incr(asoc->cnt_on_all_streams);
+ }
+ if (sctp_place_control_in_stream(strm, asoc, control)) {
+ /* Duplicate SSN? */
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
+ sctp_clean_up_control(stcb, control);
+ return;
+ }
+ if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
+ /* Ok we created this control and now
+ * lets validate that its legal i.e. there
+ * is a B bit set, if not and we have
+ * up to the cum-ack then its invalid.
+ */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
+ return;
+ }
+ }
+ }
+ if ((asoc->idata_supported == 0) && (unordered == 1)) {
+ sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
+ return;
+ }
+ /*
+ * Ok we must queue the chunk into the reasembly portion:
+ * o if its the first it goes to the control mbuf.
+ * o if its not first but the next in sequence it goes to the control,
+ * and each succeeding one in order also goes.
+ * o if its not in order we place it on the list in its place.
+ */
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /* Its the very first one. */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "chunk is a first fsn: %u becomes fsn_included\n",
+ chk->rec.data.fsn);
+ if (control->first_frag_seen) {
+ /*
+ * Error on senders part, they either
+ * sent us two data chunks with FIRST,
+ * or they sent two un-ordered chunks that
+ * were fragmented at the same time in the same stream.
+ */
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
+ return;
+ }
+ control->first_frag_seen = 1;
+ control->sinfo_ppid = chk->rec.data.ppid;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ control->fsn_included = chk->rec.data.fsn;
+ control->data = chk->data;
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ sctp_setup_tail_pointer(control);
+ asoc->size_on_all_streams += control->length;
+ } else {
+ /* Place the chunk in our list */
+ int inserted=0;
+ if (control->last_frag_seen == 0) {
+ /* Still willing to raise highest FSN seen */
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "We have a new top_fsn: %u\n",
+ chk->rec.data.fsn);
+ control->top_fsn = chk->rec.data.fsn;
+ }
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "The last fsn is now in place fsn: %u\n",
+ chk->rec.data.fsn);
+ control->last_frag_seen = 1;
+ if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "New fsn: %u is not at top_fsn: %u -- abort\n",
+ chk->rec.data.fsn,
+ control->top_fsn);
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
+ return;
+ }
+ }
+ if (asoc->idata_supported || control->first_frag_seen) {
+ /*
+ * For IDATA we always check since we know that
+ * the first fragment is 0. For old DATA we have
+ * to receive the first before we know the first FSN
+ * (which is the TSN).
+ */
+ if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+ /* We have already delivered up to this so its a dup */
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
+ return;
+ }
+ }
+ } else {
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* Second last? huh? */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Duplicate last fsn: %u (top: %u) -- abort\n",
+ chk->rec.data.fsn, control->top_fsn);
+ sctp_abort_in_reasm(stcb, control,
+ chk, abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
+ return;
+ }
+ if (asoc->idata_supported || control->first_frag_seen) {
+ /*
+ * For IDATA we always check since we know that
+ * the first fragment is 0. For old DATA we have
+ * to receive the first before we know the first FSN
+ * (which is the TSN).
+ */
+
+ if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+ /* We have already delivered up to this so its a dup */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "New fsn: %u is already seen in included_fsn: %u -- abort\n",
+ chk->rec.data.fsn, control->fsn_included);
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
+ return;
+ }
+ }
+ /* validate not beyond top FSN if we have seen last one */
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
+ chk->rec.data.fsn,
+ control->top_fsn);
+ sctp_abort_in_reasm(stcb, control, chk,
+ abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
+ return;
+ }
+ }
+ /*
+ * If we reach here, we need to place the
+ * new chunk in the reassembly for this
+ * control.
+ */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "chunk is a not first fsn: %u needs to be inserted\n",
+ chk->rec.data.fsn);
+ TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+ if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* Last not at the end? huh? */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Last fragment not last in list: -- abort\n");
+ sctp_abort_in_reasm(stcb, control,
+ chk, abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
+ return;
+ }
+ /*
+ * This one in queue is bigger than the new one, insert
+ * the new one before at.
+ */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Insert it before fsn: %u\n",
+ at->rec.data.fsn);
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+ inserted = 1;
+ break;
+ } else if (at->rec.data.fsn == chk->rec.data.fsn) {
+ /* Gak, He sent me a duplicate str seq number */
+ /*
+ * foo bar, I guess I will just free this new guy,
+ * should we abort too? FIX ME MAYBE? Or it COULD be
+ * that the SSN's have wrapped. Maybe I should
+ * compare to TSN somehow... sigh for now just blow
+ * away the chunk!
+ */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Duplicate to fsn: %u -- abort\n",
+ at->rec.data.fsn);
+ sctp_abort_in_reasm(stcb, control,
+ chk, abort_flag,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
+ return;
+ }
+ }
+ if (inserted == 0) {
+ /* Goes on the end */
+ SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
+ chk->rec.data.fsn);
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+ }
+ }
+ /*
+ * Ok lets see if we can suck any up into the control
+ * structure that are in seq if it makes sense.
+ */
+ do_wakeup = 0;
+ /*
+ * If the first fragment has not been
+ * seen there is no sense in looking.
+ */
+ if (control->first_frag_seen) {
+ next_fsn = control->fsn_included + 1;
+ TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
+ if (at->rec.data.fsn == next_fsn) {
+ /* We can add this one now to the control */
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
+ control, at,
+ at->rec.data.fsn,
+ next_fsn, control->fsn_included);
+ TAILQ_REMOVE(&control->reasm, at, sctp_next);
+ lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
+ if (control->on_read_q) {
+ do_wakeup = 1;
+ } else {
+ /*
+ * We only add to the size-on-all-streams
+ * if its not on the read q. The read q
+ * flag will cause a sballoc so its accounted
+ * for there.
+ */
+ asoc->size_on_all_streams += lenadded;
+ }
+ next_fsn++;
+ if (control->end_added && control->pdapi_started) {
+ if (strm->pd_api_started) {
+ strm->pd_api_started = 0;
+ control->pdapi_started = 0;
+ }
+ if (control->on_read_q == 0) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, control->end_added,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ if (do_wakeup) {
+#if defined(__Userspace__)
+ sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
+#endif
+ /* Need to wakeup the reader */
+ sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+}
+
+static struct sctp_queued_to_read *
+sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
+{
+ struct sctp_queued_to_read *control;
+
+ if (ordered) {
+ TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
+ if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+ break;
+ }
+ }
+ } else {
+ if (idata_supported) {
+ TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
+ if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+ break;
+ }
+ }
+ } else {
+ control = TAILQ_FIRST(&strm->uno_inqueue);
+ }
+ }
+ return (control);
+}
+
+static int
+sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct mbuf **m, int offset, int chk_length,
+ struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
+ int *break_flag, int last_chunk, uint8_t chk_type)
+{
+ struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
+ struct sctp_stream_in *strm;
+ uint32_t tsn, fsn, gap, mid;
+ struct mbuf *dmbuf;
+ int the_len;
+ int need_reasm_check = 0;
+ uint16_t sid;
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+ struct sctp_queued_to_read *control, *ncontrol;
+ uint32_t ppid;
+ uint8_t chk_flags;
+ struct sctp_stream_reset_list *liste;
+ int ordered;
+ size_t clen;
+ int created_control = 0;
+
+ if (chk_type == SCTP_IDATA) {
+ struct sctp_idata_chunk *chunk, chunk_buf;
+
+ chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
+ sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
+ chk_flags = chunk->ch.chunk_flags;
+ clen = sizeof(struct sctp_idata_chunk);
+ tsn = ntohl(chunk->dp.tsn);
+ sid = ntohs(chunk->dp.sid);
+ mid = ntohl(chunk->dp.mid);
+ if (chk_flags & SCTP_DATA_FIRST_FRAG) {
+ fsn = 0;
+ ppid = chunk->dp.ppid_fsn.ppid;
+ } else {
+ fsn = ntohl(chunk->dp.ppid_fsn.fsn);
+ ppid = 0xffffffff; /* Use as an invalid value. */
+ }
+ } else {
+ struct sctp_data_chunk *chunk, chunk_buf;
+
+ chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
+ sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
+ chk_flags = chunk->ch.chunk_flags;
+ clen = sizeof(struct sctp_data_chunk);
+ tsn = ntohl(chunk->dp.tsn);
+ sid = ntohs(chunk->dp.sid);
+ mid = (uint32_t)(ntohs(chunk->dp.ssn));
+ fsn = tsn;
+ ppid = chunk->dp.ppid;
+ }
+ if ((size_t)chk_length == clen) {
+ /*
+ * Need to send an abort since we had a
+ * empty data chunk.
+ */
+ op_err = sctp_generate_no_user_data_cause(tsn);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
+ asoc->send_sack = 1;
+ }
+ ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
+ }
+ if (stcb == NULL) {
+ return (0);
+ }
+ SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
+ if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
+ /* It is a duplicate */
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ asoc->send_sack = 1;
+ return (0);
+ }
+ /* Calculate the number of TSN's between the base and this TSN */
+ SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+ if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
+ /* Can't hold the bit in the mapping at max array, toss it */
+ return (0);
+ }
+ if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (sctp_expand_mapping_array(asoc, gap)) {
+ /* Can't expand, drop it */
+ return (0);
+ }
+ }
+ if (SCTP_TSN_GT(tsn, *high_tsn)) {
+ *high_tsn = tsn;
+ }
+ /* See if we have received this one already */
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
+ SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ asoc->send_sack = 1;
+ return (0);
+ }
+ /*
+ * Check to see about the GONE flag, duplicates would cause a sack
+ * to be sent up above
+ */
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
+ /*
+ * wait a minute, this guy is gone, there is no longer a
+ * receiver. Send peer an ABORT!
+ */
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ /*
+ * Now before going further we see if there is room. If NOT then we
+ * MAY let one through only IF this TSN is the one we are waiting
+ * for on a partial delivery API.
+ */
+
+ /* Is the stream valid? */
+ if (sid >= asoc->streamincnt) {
+ struct sctp_error_invalid_stream *cause;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
+ 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err != NULL) {
+ /* add some space up front so prepend will work well */
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ cause = mtod(op_err, struct sctp_error_invalid_stream *);
+ /*
+ * Error causes are just param's and this one has
+ * two back to back phdr, one with the error type
+ * and size, the other with the streamid and a rsvd
+ */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
+ cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
+ cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
+ cause->stream_id = htons(sid);
+ cause->reserved = htons(0);
+ sctp_queue_op_err(stcb, op_err);
+ }
+ SCTP_STAT_INCR(sctps_badsid);
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ if (tsn == (asoc->cumulative_tsn + 1)) {
+ /* Update cum-ack */
+ asoc->cumulative_tsn = tsn;
+ }
+ return (0);
+ }
+ /*
+ * If its a fragmented message, lets see if we can
+ * find the control on the reassembly queues.
+ */
+ if ((chk_type == SCTP_IDATA) &&
+ ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
+ (fsn == 0)) {
+ /*
+ * The first *must* be fsn 0, and other
+ * (middle/end) pieces can *not* be fsn 0.
+ * XXX: This can happen in case of a wrap around.
+ * Ignore is for now.
+ */
+ SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
+ goto err_out;
+ }
+ control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
+ SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
+ chk_flags, control);
+ if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+ /* See if we can find the re-assembly entity */
+ if (control != NULL) {
+ /* We found something, does it belong? */
+ if (ordered && (mid != control->mid)) {
+ SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
+ err_out:
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
+ /* We can't have a switched order with an unordered chunk */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+ tsn);
+ goto err_out;
+ }
+ if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
+ /* We can't have a switched unordered with a ordered chunk */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+ tsn);
+ goto err_out;
+ }
+ }
+ } else {
+ /* Its a complete segment. Lets validate we
+ * don't have a re-assembly going on with
+ * the same Stream/Seq (for ordered) or in
+ * the same Stream for unordered.
+ */
+ if (control != NULL) {
+ if (ordered || asoc->idata_supported) {
+ SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
+ chk_flags, mid);
+ SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
+ goto err_out;
+ } else {
+ if ((tsn == control->fsn_included + 1) &&
+ (control->end_added == 0)) {
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Illegal message sequence, missing end for MID: %8.8x",
+ control->fsn_included);
+ goto err_out;
+ } else {
+ control = NULL;
+ }
+ }
+ }
+ }
+ /* now do the tests */
+ if (((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
+ (((int)asoc->my_rwnd) <= 0)) {
+ /*
+ * When we have NO room in the rwnd we check to make sure
+ * the reader is doing its job...
+ */
+ if (stcb->sctp_socket->so_rcv.sb_cc) {
+ /* some to read, wake-up */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (0);
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* now is it in the mapping array of what we have accepted? */
+ if (chk_type == SCTP_DATA) {
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
+ SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+ /* Nope not in the valid range dump it */
+ dump_packet:
+ sctp_set_rwnd(stcb, asoc);
+ if ((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
+ SCTP_STAT_INCR(sctps_datadropchklmt);
+ } else {
+ SCTP_STAT_INCR(sctps_datadroprwnd);
+ }
+ *break_flag = 1;
+ return (0);
+ }
+ } else {
+ if (control == NULL) {
+ goto dump_packet;
+ }
+ if (SCTP_TSN_GT(fsn, control->top_fsn)) {
+ goto dump_packet;
+ }
+ }
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
+ asoc->tsn_in_at = 0;
+ asoc->tsn_in_wrapped = 1;
+ }
+ asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
+ asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
+ asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
+ asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
+ asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
+ asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
+ asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
+ asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
+ asoc->tsn_in_at++;
+#endif
+ /*
+ * Before we continue lets validate that we are not being fooled by
+ * an evil attacker. We can only have Nk chunks based on our TSN
+ * spread allowed by the mapping array N * 8 bits, so there is no
+ * way our stream sequence numbers could have wrapped. We of course
+ * only validate the FIRST fragment so the bit must be set.
+ */
+ if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
+ (TAILQ_EMPTY(&asoc->resetHead)) &&
+ (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
+ SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
+ /* The incoming sseq is behind where we last delivered? */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
+ mid, asoc->strmin[sid].last_mid_delivered);
+
+ if (asoc->idata_supported) {
+ SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+ asoc->strmin[sid].last_mid_delivered,
+ tsn,
+ sid,
+ mid);
+ } else {
+ SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+ (uint16_t)asoc->strmin[sid].last_mid_delivered,
+ tsn,
+ sid,
+ (uint16_t)mid);
+ }
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ if (chk_type == SCTP_IDATA) {
+ the_len = (chk_length - sizeof(struct sctp_idata_chunk));
+ } else {
+ the_len = (chk_length - sizeof(struct sctp_data_chunk));
+ }
+ if (last_chunk == 0) {
+ if (chk_type == SCTP_IDATA) {
+ dmbuf = SCTP_M_COPYM(*m,
+ (offset + sizeof(struct sctp_idata_chunk)),
+ the_len, M_NOWAIT);
+ } else {
+ dmbuf = SCTP_M_COPYM(*m,
+ (offset + sizeof(struct sctp_data_chunk)),
+ the_len, M_NOWAIT);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
+ }
+#endif
+ } else {
+ /* We can steal the last chunk */
+ int l_len;
+ dmbuf = *m;
+ /* lop off the top part */
+ if (chk_type == SCTP_IDATA) {
+ m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
+ } else {
+ m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+ }
+ if (SCTP_BUF_NEXT(dmbuf) == NULL) {
+ l_len = SCTP_BUF_LEN(dmbuf);
+ } else {
+ /* need to count up the size hopefully
+ * does not hit this to often :-0
+ */
+ struct mbuf *lat;
+
+ l_len = 0;
+ for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
+ l_len += SCTP_BUF_LEN(lat);
+ }
+ }
+ if (l_len > the_len) {
+ /* Trim the end round bytes off too */
+ m_adj(dmbuf, -(l_len - the_len));
+ }
+ }
+ if (dmbuf == NULL) {
+ SCTP_STAT_INCR(sctps_nomem);
+ return (0);
+ }
+ /*
+ * Now no matter what, we need a control, get one
+ * if we don't have one (we may have gotten it
+ * above when we found the message was fragmented
+ */
+ if (control == NULL) {
+ sctp_alloc_a_readq(stcb, control);
+ sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+ ppid,
+ sid,
+ chk_flags,
+ NULL, fsn, mid);
+ if (control == NULL) {
+ SCTP_STAT_INCR(sctps_nomem);
+ return (0);
+ }
+ if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ struct mbuf *mm;
+
+ control->data = dmbuf;
+ control->tail_mbuf = NULL;
+ for (mm = control->data; mm; mm = mm->m_next) {
+ control->length += SCTP_BUF_LEN(mm);
+ if (SCTP_BUF_NEXT(mm) == NULL) {
+ control->tail_mbuf = mm;
+ }
+ }
+ control->end_added = 1;
+ control->last_frag_seen = 1;
+ control->first_frag_seen = 1;
+ control->fsn_included = fsn;
+ control->top_fsn = fsn;
+ }
+ created_control = 1;
+ }
+ SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
+ chk_flags, ordered, mid, control);
+ if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
+ TAILQ_EMPTY(&asoc->resetHead) &&
+ ((ordered == 0) ||
+ (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
+ TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
+ /* Candidate for express delivery */
+ /*
+ * Its not fragmented, No PD-API is up, Nothing in the
+ * delivery queue, Its un-ordered OR ordered and the next to
+ * deliver AND nothing else is stuck on the stream queue,
+ * And there is room for it in the socket buffer. Lets just
+ * stuff it up the buffer....
+ */
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
+ control, mid);
+
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control, &stcb->sctp_socket->so_rcv,
+ 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+ if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
+ /* for ordered, bump what we delivered */
+ asoc->strmin[sid].last_mid_delivered++;
+ }
+ SCTP_STAT_INCR(sctps_recvexpress);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del_alt(stcb, tsn, mid, sid,
+ SCTP_STR_LOG_FROM_EXPRS_DEL);
+ }
+ control = NULL;
+ goto finish_express_del;
+ }
+
+ /* Now will we need a chunk too? */
+ if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No memory so we drop the chunk */
+ SCTP_STAT_INCR(sctps_nomem);
+ if (last_chunk == 0) {
+ /* we copied it, free the copy */
+ sctp_m_freem(dmbuf);
+ }
+ return (0);
+ }
+ chk->rec.data.tsn = tsn;
+ chk->no_fr_allowed = 0;
+ chk->rec.data.fsn = fsn;
+ chk->rec.data.mid = mid;
+ chk->rec.data.sid = sid;
+ chk->rec.data.ppid = ppid;
+ chk->rec.data.context = stcb->asoc.context;
+ chk->rec.data.doing_fast_retransmit = 0;
+ chk->rec.data.rcv_flags = chk_flags;
+ chk->asoc = asoc;
+ chk->send_size = the_len;
+ chk->whoTo = net;
+ SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
+ chk,
+ control, mid);
+ atomic_add_int(&net->ref_count, 1);
+ chk->data = dmbuf;
+ }
+ /* Set the appropriate TSN mark */
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ } else {
+ SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
+ asoc->highest_tsn_inside_map = tsn;
+ }
+ }
+ /* Now is it complete (i.e. not fragmented)? */
+ if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ /*
+ * Special check for when streams are resetting. We
+ * could be more smart about this and check the
+ * actual stream to see if it is not being reset..
+ * that way we would not create a HOLB when amongst
+ * streams being reset and those not being reset.
+ *
+ */
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ SCTP_TSN_GT(tsn, liste->tsn)) {
+ /*
+ * yep its past where we need to reset... go
+ * ahead and queue it.
+ */
+ if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
+ /* first one on */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ } else {
+ struct sctp_queued_to_read *lcontrol, *nlcontrol;
+ unsigned char inserted = 0;
+ TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
+ if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
+
+ continue;
+ } else {
+ /* found it */
+ TAILQ_INSERT_BEFORE(lcontrol, control, next);
+ inserted = 1;
+ break;
+ }
+ }
+ if (inserted == 0) {
+ /*
+ * must be put at end, use
+ * prevP (all setup from
+ * loop) to setup nextP.
+ */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ }
+ }
+ goto finish_express_del;
+ }
+ if (chk_flags & SCTP_DATA_UNORDERED) {
+ /* queue directly into socket buffer */
+ SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
+ control, mid);
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+ } else {
+ SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
+ mid);
+ sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
+ if (*abort_flag) {
+ if (last_chunk) {
+ *m = NULL;
+ }
+ return (0);
+ }
+ }
+ goto finish_express_del;
+ }
+ /* If we reach here its a reassembly */
+ need_reasm_check = 1;
+ SCTPDBG(SCTP_DEBUG_XXX,
+ "Queue data to stream for reasm control: %p MID: %u\n",
+ control, mid);
+ sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
+ if (*abort_flag) {
+ /*
+ * the assoc is now gone and chk was put onto the
+ * reasm queue, which has all been freed.
+ */
+ if (last_chunk) {
+ *m = NULL;
+ }
+ return (0);
+ }
+finish_express_del:
+ /* Here we tidy up things */
+ if (tsn == (asoc->cumulative_tsn + 1)) {
+ /* Update cum-ack */
+ asoc->cumulative_tsn = tsn;
+ }
+ if (last_chunk) {
+ *m = NULL;
+ }
+ if (ordered) {
+ SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
+ }
+ SCTP_STAT_INCR(sctps_recvdata);
+ /* Set it present please */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
+ }
+ if (need_reasm_check) {
+ (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
+ need_reasm_check = 0;
+ }
+ /* check the special flag for stream resets */
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
+ /*
+ * we have finished working through the backlogged TSN's now
+ * time to reset streams. 1: call reset function. 2: free
+ * pending_reply space 3: distribute any chunks in
+ * pending_reply_queue.
+ */
+ sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
+ TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+ sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
+ SCTP_FREE(liste, SCTP_M_STRESET);
+ /*sa_ignore FREED_MEMORY*/
+ liste = TAILQ_FIRST(&asoc->resetHead);
+ if (TAILQ_EMPTY(&asoc->resetHead)) {
+ /* All can be removed */
+ TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
+ strm = &asoc->strmin[control->sinfo_stream];
+ sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
+ if (*abort_flag) {
+ return (0);
+ }
+ if (need_reasm_check) {
+ (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
+ need_reasm_check = 0;
+ }
+ }
+ } else {
+ TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
+ if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
+ break;
+ }
+ /*
+ * if control->sinfo_tsn is <= liste->tsn we can
+ * process it which is the NOT of
+ * control->sinfo_tsn > liste->tsn
+ */
+ TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
+ strm = &asoc->strmin[control->sinfo_stream];
+ sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
+ if (*abort_flag) {
+ return (0);
+ }
+ if (need_reasm_check) {
+ (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
+ need_reasm_check = 0;
+ }
+ }
+ }
+ }
+ return (1);
+}
+
+static const int8_t sctp_map_lookup_tab[256] = {
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 6,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 7,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 6,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 8
+};
+
+
+void
+sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
+{
+ /*
+ * Now we also need to check the mapping array in a couple of ways.
+ * 1) Did we move the cum-ack point?
+ *
+ * When you first glance at this you might think
+ * that all entries that make up the position
+ * of the cum-ack would be in the nr-mapping array
+ * only.. i.e. things up to the cum-ack are always
+ * deliverable. Thats true with one exception, when
+ * its a fragmented message we may not deliver the data
+ * until some threshold (or all of it) is in place. So
+ * we must OR the nr_mapping_array and mapping_array to
+ * get a true picture of the cum-ack.
+ */
+ struct sctp_association *asoc;
+ int at;
+ uint8_t val;
+ int slide_from, slide_end, lgap, distance;
+ uint32_t old_cumack, old_base, old_highest, highest_tsn;
+
+ asoc = &stcb->asoc;
+
+ old_cumack = asoc->cumulative_tsn;
+ old_base = asoc->mapping_array_base_tsn;
+ old_highest = asoc->highest_tsn_inside_map;
+ /*
+ * We could probably improve this a small bit by calculating the
+ * offset of the current cum-ack as the starting point.
+ */
+ at = 0;
+ for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
+ val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
+ if (val == 0xff) {
+ at += 8;
+ } else {
+ /* there is a 0 bit */
+ at += sctp_map_lookup_tab[val];
+ break;
+ }
+ }
+ asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
+
+ if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
+ SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
+#ifdef INVARIANTS
+ panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+#else
+ SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+ sctp_print_mapping_array(asoc);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+#endif
+ }
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+ if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
+ /* The complete array was completed by a single FR */
+ /* highest becomes the cum-ack */
+ int clr;
+#ifdef INVARIANTS
+ unsigned int i;
+#endif
+
+ /* clear the array */
+ clr = ((at+7) >> 3);
+ if (clr > asoc->mapping_array_size) {
+ clr = asoc->mapping_array_size;
+ }
+ memset(asoc->mapping_array, 0, clr);
+ memset(asoc->nr_mapping_array, 0, clr);
+#ifdef INVARIANTS
+ for (i = 0; i < asoc->mapping_array_size; i++) {
+ if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
+ SCTP_PRINTF("Error Mapping array's not clean at clear\n");
+ sctp_print_mapping_array(asoc);
+ }
+ }
+#endif
+ asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ } else if (at >= 8) {
+ /* we can slide the mapping array down */
+ /* slide_from holds where we hit the first NON 0xff byte */
+
+ /*
+ * now calculate the ceiling of the move using our highest
+ * TSN value
+ */
+ SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
+ slide_end = (lgap >> 3);
+ if (slide_end < slide_from) {
+ sctp_print_mapping_array(asoc);
+#ifdef INVARIANTS
+ panic("impossible slide");
+#else
+ SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
+ lgap, slide_end, slide_from, at);
+ return;
+#endif
+ }
+ if (slide_end > asoc->mapping_array_size) {
+#ifdef INVARIANTS
+ panic("would overrun buffer");
+#else
+ SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
+ asoc->mapping_array_size, slide_end);
+ slide_end = asoc->mapping_array_size;
+#endif
+ }
+ distance = (slide_end - slide_from) + 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(old_base, old_cumack, old_highest,
+ SCTP_MAP_PREPARE_SLIDE);
+ sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
+ (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
+ }
+ if (distance + slide_from > asoc->mapping_array_size ||
+ distance < 0) {
+ /*
+ * Here we do NOT slide forward the array so that
+ * hopefully when more data comes in to fill it up
+ * we will be able to slide it forward. Really I
+ * don't think this should happen :-0
+ */
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
+ (uint32_t) asoc->mapping_array_size,
+ SCTP_MAP_SLIDE_NONE);
+ }
+ } else {
+ int ii;
+
+ for (ii = 0; ii < distance; ii++) {
+ asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
+ asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
+
+ }
+ for (ii = distance; ii < asoc->mapping_array_size; ii++) {
+ asoc->mapping_array[ii] = 0;
+ asoc->nr_mapping_array[ii] = 0;
+ }
+ if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
+ asoc->highest_tsn_inside_map += (slide_from << 3);
+ }
+ if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
+ asoc->highest_tsn_inside_nr_map += (slide_from << 3);
+ }
+ asoc->mapping_array_base_tsn += (slide_from << 3);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(asoc->mapping_array_base_tsn,
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
+ SCTP_MAP_SLIDE_RESULT);
+ }
+ }
+ }
+}
+
+void
+sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
+{
+ struct sctp_association *asoc;
+ uint32_t highest_tsn;
+ int is_a_gap;
+
+ sctp_slide_mapping_arrays(stcb);
+ asoc = &stcb->asoc;
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+ /* Is there a gap now? */
+ is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+
+ /*
+ * Now we need to see if we need to queue a sack or just start the
+ * timer (if allowed).
+ */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * Ok special case, in SHUTDOWN-SENT case. here we
+ * maker sure SACK timer is off and instead send a
+ * SHUTDOWN and a SACK
+ */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
+ }
+ sctp_send_shutdown(stcb,
+ ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
+ if (is_a_gap) {
+ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /*
+ * CMT DAC algorithm: increase number of packets
+ * received since last ack
+ */
+ stcb->asoc.cmt_dac_pkts_rcvd++;
+
+ if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
+ ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
+ * longer is one */
+ (stcb->asoc.numduptsns) || /* we have dup's */
+ (is_a_gap) || /* is still a gap */
+ (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
+ (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
+ ) {
+
+ if ((stcb->asoc.sctp_cmt_on_off > 0) &&
+ (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
+ (stcb->asoc.send_sack == 0) &&
+ (stcb->asoc.numduptsns == 0) &&
+ (stcb->asoc.delayed_ack) &&
+ (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
+
+ /*
+ * CMT DAC algorithm: With CMT,
+ * delay acks even in the face of
+
+ * reordering. Therefore, if acks
+ * that do not have to be sent
+ * because of the above reasons,
+ * will be delayed. That is, acks
+ * that would have been sent due to
+ * gap reports will be delayed with
+ * DAC. Start the delayed ack timer.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ /*
+ * Ok we must build a SACK since the
+ * timer is pending, we got our
+ * first packet OR there are gaps or
+ * duplicates.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
+ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ }
+ }
+}
+
+int
+sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint32_t *high_tsn)
+{
+ struct sctp_chunkhdr *ch, chunk_buf;
+ struct sctp_association *asoc;
+ int num_chunks = 0; /* number of control chunks processed */
+ int stop_proc = 0;
+ int break_flag, last_chunk;
+ int abort_flag = 0, was_a_gap;
+ struct mbuf *m;
+ uint32_t highest_tsn;
+ uint16_t chk_length;
+
+ /* set the rwnd */
+ sctp_set_rwnd(stcb, &stcb->asoc);
+
+ m = *mm;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+ was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+ /*
+ * setup where we got the last DATA packet from for any SACK that
+ * may need to go out. Don't bump the net. This is done ONLY when a
+ * chunk is assigned.
+ */
+ asoc->last_data_chunk_from = net;
+
+ /*-
+ * Now before we proceed we must figure out if this is a wasted
+ * cluster... i.e. it is a small packet sent in and yet the driver
+ * underneath allocated a full cluster for it. If so we must copy it
+ * to a smaller mbuf and free up the cluster mbuf. This will help
+ * with cluster starvation.
+ */
+ if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
+ /* we only handle mbufs that are singletons.. not chains */
+ m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
+ if (m) {
+ /* ok lets see if we can copy the data up */
+ caddr_t *from, *to;
+ /* get the pointers and copy */
+ to = mtod(m, caddr_t *);
+ from = mtod((*mm), caddr_t *);
+ memcpy(to, from, SCTP_BUF_LEN((*mm)));
+ /* copy the length and free up the old */
+ SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
+ sctp_m_freem(*mm);
+ /* success, back copy */
+ *mm = m;
+ } else {
+ /* We are in trouble in the mbuf world .. yikes */
+ m = *mm;
+ }
+ }
+ /* get pointer to the first chunk header */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr),
+ (uint8_t *)&chunk_buf);
+ if (ch == NULL) {
+ return (1);
+ }
+ /*
+ * process all DATA chunks...
+ */
+ *high_tsn = asoc->cumulative_tsn;
+ break_flag = 0;
+ asoc->data_pkts_seen++;
+ while (stop_proc == 0) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->chunk_length);
+ if (length - *offset < chk_length) {
+ /* all done, mutulated chunk */
+ stop_proc = 1;
+ continue;
+ }
+ if ((asoc->idata_supported == 1) &&
+ (ch->chunk_type == SCTP_DATA)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (2);
+ }
+ if ((asoc->idata_supported == 0) &&
+ (ch->chunk_type == SCTP_IDATA)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (2);
+ }
+ if ((ch->chunk_type == SCTP_DATA) ||
+ (ch->chunk_type == SCTP_IDATA)) {
+ uint16_t clen;
+
+ if (ch->chunk_type == SCTP_DATA) {
+ clen = sizeof(struct sctp_data_chunk);
+ } else {
+ clen = sizeof(struct sctp_idata_chunk);
+ }
+ if (chk_length < clen) {
+ /*
+ * Need to send an abort since we had a
+ * invalid data chunk.
+ */
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
+ ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
+ chk_length);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (2);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB1, 0);
+#endif
+ if (SCTP_SIZE32(chk_length) == (length - *offset)) {
+ last_chunk = 1;
+ } else {
+ last_chunk = 0;
+ }
+ if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
+ chk_length, net, high_tsn, &abort_flag, &break_flag,
+ last_chunk, ch->chunk_type)) {
+ num_chunks++;
+ }
+ if (abort_flag)
+ return (2);
+
+ if (break_flag) {
+ /*
+ * Set because of out of rwnd space and no
+ * drop rep space left.
+ */
+ stop_proc = 1;
+ continue;
+ }
+ } else {
+ /* not a data chunk in the data region */
+ switch (ch->chunk_type) {
+ case SCTP_INITIATION:
+ case SCTP_INITIATION_ACK:
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK:
+ case SCTP_HEARTBEAT_REQUEST:
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_SHUTDOWN:
+ case SCTP_SHUTDOWN_ACK:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_COOKIE_ECHO:
+ case SCTP_COOKIE_ACK:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_AUTHENTICATION:
+ case SCTP_ASCONF_ACK:
+ case SCTP_PACKET_DROPPED:
+ case SCTP_STREAM_RESET:
+ case SCTP_FORWARD_CUM_TSN:
+ case SCTP_ASCONF:
+ {
+ /*
+ * Now, what do we do with KNOWN chunks that
+ * are NOT in the right place?
+ *
+ * For now, I do nothing but ignore them. We
+ * may later want to add sysctl stuff to
+ * switch out and do either an ABORT() or
+ * possibly process them.
+ */
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
+ ch->chunk_type);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (2);
+ }
+ default:
+ /*
+ * Unknown chunk type: use bit rules after
+ * checking length
+ */
+ if (chk_length < sizeof(struct sctp_chunkhdr)) {
+ /*
+ * Need to send an abort since we had a
+ * invalid chunk.
+ */
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (2);
+ }
+ if (ch->chunk_type & 0x40) {
+ /* Add a error report to the queue */
+ struct mbuf *op_err;
+ struct sctp_gen_error_cause *cause;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
+ 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err != NULL) {
+ cause = mtod(op_err, struct sctp_gen_error_cause *);
+ cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+ SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
+ if (SCTP_BUF_NEXT(op_err) != NULL) {
+ sctp_queue_op_err(stcb, op_err);
+ } else {
+ sctp_m_freem(op_err);
+ }
+ }
+ }
+ if ((ch->chunk_type & 0x80) == 0) {
+ /* discard the rest of this packet */
+ stop_proc = 1;
+ } /* else skip this bad chunk and
+ * continue... */
+ break;
+ } /* switch of chunk type */
+ }
+ *offset += SCTP_SIZE32(chk_length);
+ if ((*offset >= length) || stop_proc) {
+ /* no more data left in the mbuf chain */
+ stop_proc = 1;
+ continue;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr),
+ (uint8_t *)&chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ stop_proc = 1;
+ continue;
+ }
+ }
+ if (break_flag) {
+ /*
+ * we need to report rwnd overrun drops.
+ */
+ sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
+ }
+ if (num_chunks) {
+ /*
+ * Did we get data, if so update the time for auto-close and
+ * give peer credit for being alive.
+ */
+ SCTP_STAT_INCR(sctps_recvpktwithdata);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
+ }
+ /* now service all of the reassm queue if needed */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
+ /* Assure that we ack right away */
+ stcb->asoc.send_sack = 1;
+ }
+ /* Start a sack timer or QUEUE a SACK for sending */
+ sctp_sack_check(stcb, was_a_gap);
+ return (0);
+}
+
+static int
+sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
+ uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
+ int *num_frs,
+ uint32_t *biggest_newly_acked_tsn,
+ uint32_t *this_sack_lowest_newack,
+ int *rto_ok)
+{
+ struct sctp_tmit_chunk *tp1;
+ unsigned int theTSN;
+ int j, wake_him = 0, circled = 0;
+
+ /* Recover the tp1 we last saw */
+ tp1 = *p_tp1;
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ for (j = frag_strt; j <= frag_end; j++) {
+ theTSN = j + last_tsn;
+ while (tp1) {
+ if (tp1->rec.data.doing_fast_retransmit)
+ (*num_frs) += 1;
+
+ /*-
+ * CMT: CUCv2 algorithm. For each TSN being
+ * processed from the sent queue, track the
+ * next expected pseudo-cumack, or
+ * rtx_pseudo_cumack, if required. Separate
+ * cumack trackers for first transmissions,
+ * and retransmissions.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->whoTo->find_pseudo_cumack == 1) &&
+ (tp1->snd_count == 1)) {
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
+ tp1->whoTo->find_pseudo_cumack = 0;
+ }
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
+ (tp1->snd_count > 1)) {
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
+ tp1->whoTo->find_rtx_pseudo_cumack = 0;
+ }
+ if (tp1->rec.data.tsn == theTSN) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*-
+ * must be held until
+ * cum-ack passes
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ /*-
+ * If it is less than RESEND, it is
+ * now no-longer in flight.
+ * Higher values may already be set
+ * via previous Gap Ack Blocks...
+ * i.e. ACKED or RESEND.
+ */
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
+ *biggest_newly_acked_tsn)) {
+ *biggest_newly_acked_tsn = tp1->rec.data.tsn;
+ }
+ /*-
+ * CMT: SFR algo (and HTNA) - set
+ * saw_newack to 1 for dest being
+ * newly acked. update
+ * this_sack_highest_newack if
+ * appropriate.
+ */
+ if (tp1->rec.data.chunk_was_revoked == 0)
+ tp1->whoTo->saw_newack = 1;
+
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
+ tp1->whoTo->this_sack_highest_newack)) {
+ tp1->whoTo->this_sack_highest_newack =
+ tp1->rec.data.tsn;
+ }
+ /*-
+ * CMT DAC algo: also update
+ * this_sack_lowest_newack
+ */
+ if (*this_sack_lowest_newack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*this_sack_lowest_newack,
+ last_tsn,
+ tp1->rec.data.tsn,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ *this_sack_lowest_newack = tp1->rec.data.tsn;
+ }
+ /*-
+ * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
+ * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
+ * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
+ * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
+ * Separate pseudo_cumack trackers for first transmissions and
+ * retransmissions.
+ */
+ if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+ }
+ if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*biggest_newly_acked_tsn,
+ last_tsn,
+ tp1->rec.data.tsn,
+ frag_strt,
+ frag_end,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ sctp_flight_size_decrease(tp1);
+ if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+ tp1);
+ }
+ sctp_total_flight_decrease(stcb, tp1);
+
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*-
+ * True non-retransmitted chunk
+ */
+ tp1->whoTo->net_ack2 += tp1->send_size;
+
+ /*-
+ * update RTO too ?
+ */
+ if (tp1->do_rtt) {
+ if (*rto_ok &&
+ sctp_calculate_rto(stcb,
+ &stcb->asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ SCTP_RTT_FROM_DATA)) {
+ *rto_ok = 0;
+ }
+ if (tp1->whoTo->rto_needed == 0) {
+ tp1->whoTo->rto_needed = 1;
+ }
+ tp1->do_rtt = 0;
+ }
+ }
+
+ }
+ if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
+ stcb->asoc.this_sack_highest_gap)) {
+ stcb->asoc.this_sack_highest_gap =
+ tp1->rec.data.tsn;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB2,
+ (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ }
+ /*-
+ * All chunks NOT UNSENT fall through here and are marked
+ * (leave PR-SCTP ones that are to skip alone though)
+ */
+ if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
+ (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ /* NR Sack code here */
+ if (nr_sacking &&
+ (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
+ if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+ }
+ if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
+ stcb->asoc.trigger_reset = 1;
+ }
+ tp1->sent = SCTP_DATAGRAM_NR_ACKED;
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ wake_him++;
+ }
+ }
+ break;
+ } /* if (tp1->tsn == theTSN) */
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ if ((tp1 == NULL) && (circled == 0)) {
+ circled++;
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ } /* end while (tp1) */
+ if (tp1 == NULL) {
+ circled = 0;
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ /* In case the fragments were not in order we must reset */
+ } /* end for (j = fragStart */
+ *p_tp1 = tp1;
+ return (wake_him); /* Return value only used for nr-sack */
+}
+
+
+static int
+sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
+ uint32_t last_tsn, uint32_t *biggest_tsn_acked,
+ uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
+ int num_seg, int num_nr_seg, int *rto_ok)
+{
+ struct sctp_gap_ack_block *frag, block;
+ struct sctp_tmit_chunk *tp1;
+ int i;
+ int num_frs = 0;
+ int chunk_freed;
+ int non_revocable;
+ uint16_t frag_strt, frag_end, prev_frag_end;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ prev_frag_end = 0;
+ chunk_freed = 0;
+
+ for (i = 0; i < (num_seg + num_nr_seg); i++) {
+ if (i == num_seg) {
+ prev_frag_end = 0;
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ return (chunk_freed);
+ }
+ frag_strt = ntohs(frag->start);
+ frag_end = ntohs(frag->end);
+
+ if (frag_strt > frag_end) {
+ /* This gap report is malformed, skip it. */
+ continue;
+ }
+ if (frag_strt <= prev_frag_end) {
+ /* This gap report is not in order, so restart. */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
+ *biggest_tsn_acked = last_tsn + frag_end;
+ }
+ if (i < num_seg) {
+ non_revocable = 0;
+ } else {
+ non_revocable = 1;
+ }
+ if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
+ non_revocable, &num_frs, biggest_newly_acked_tsn,
+ this_sack_lowest_newack, rto_ok)) {
+ chunk_freed = 1;
+ }
+ prev_frag_end = frag_end;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (num_frs)
+ sctp_log_fr(*biggest_tsn_acked,
+ *biggest_newly_acked_tsn,
+ last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+ }
+ return (chunk_freed);
+}
+
+static void
+sctp_check_for_revoked(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, uint32_t cumack,
+ uint32_t biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
+ break;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+ /* it has been revoked */
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ tp1->rec.data.chunk_was_revoked = 1;
+ /* We must add this stuff back in to
+ * assure timers and such get started.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ /* We inflate the cwnd to compensate for our
+ * artificial inflation of the flight_size.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.tsn,
+ 0,
+ 0,
+ SCTP_LOG_TSN_REVOKED);
+ }
+ } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ }
+}
+
+
+static void
+sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
+{
+ struct sctp_tmit_chunk *tp1;
+ int strike_flag = 0;
+ struct timeval now;
+ int tot_retrans = 0;
+ uint32_t sending_seq;
+ struct sctp_nets *net;
+ int num_dests_sacked = 0;
+
+ /*
+ * select the sending_seq, this is either the next thing ready to be
+ * sent but not transmitted, OR, the next seq we assign.
+ */
+ tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (tp1 == NULL) {
+ sending_seq = asoc->sending_seq;
+ } else {
+ sending_seq = tp1->rec.data.tsn;
+ }
+
+ /* CMT DAC algo: finding out if SACK is a mixed SACK */
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->saw_newack)
+ num_dests_sacked++;
+ }
+ }
+ if (stcb->asoc.prsctp_supported) {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ }
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ strike_flag = 0;
+ if (tp1->no_fr_allowed) {
+ /* this one had a timeout or something */
+ continue;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND)
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_CHECK_STRIKE);
+ }
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
+ tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* done */
+ break;
+ }
+ if (stcb->asoc.prsctp_supported) {
+ if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /* Is it expired? */
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
+#else
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+#endif
+ /* Yes so drop it */
+ if (tp1->data != NULL) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
+ SCTP_SO_NOT_LOCKED);
+ }
+ continue;
+ }
+ }
+
+ }
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
+ !(accum_moved && asoc->fast_retran_loss_recovery)) {
+ /* we are beyond the tsn in the sack */
+ break;
+ }
+ if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
+ /* either a RESEND, ACKED, or MARKED */
+ /* skip */
+ if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ /* Continue strikin FWD-TSN chunks */
+ tp1->rec.data.fwd_tsn_cnt++;
+ }
+ continue;
+ }
+ /*
+ * CMT : SFR algo (covers part of DAC and HTNA as well)
+ */
+ if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
+ /*
+ * No new acks were receieved for data sent to this
+ * dest. Therefore, according to the SFR algo for
+ * CMT, no data sent to this dest can be marked for
+ * FR using this SACK.
+ */
+ continue;
+ } else if (tp1->whoTo &&
+ SCTP_TSN_GT(tp1->rec.data.tsn,
+ tp1->whoTo->this_sack_highest_newack) &&
+ !(accum_moved && asoc->fast_retran_loss_recovery)) {
+ /*
+ * CMT: New acks were receieved for data sent to
+ * this dest. But no new acks were seen for data
+ * sent after tp1. Therefore, according to the SFR
+ * algo for CMT, tp1 cannot be marked for FR using
+ * this SACK. This step covers part of the DAC algo
+ * and the HTNA algo as well.
+ */
+ continue;
+ }
+ /*
+ * Here we check to see if we were have already done a FR
+ * and if so we see if the biggest TSN we saw in the sack is
+ * smaller than the recovery point. If so we don't strike
+ * the tsn... otherwise we CAN strike the TSN.
+ */
+ /*
+ * @@@ JRI: Check for CMT
+ * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
+ */
+ if (accum_moved && asoc->fast_retran_loss_recovery) {
+ /*
+ * Strike the TSN if in fast-recovery and cum-ack
+ * moved.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more.
+ * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+ * two packets have been received after this missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(16 + num_dests_sacked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ tp1->sent++;
+ }
+ }
+ } else if ((tp1->rec.data.doing_fast_retransmit) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * For those that have done a FR we must take
+ * special consideration if we strike. I.e the
+ * biggest_newly_acked must be higher than the
+ * sending_seq at the time we did the FR.
+ */
+ if (
+#ifdef SCTP_FR_TO_ALTERNATE
+ /*
+ * If FR's go to new networks, then we must only do
+ * this for singly homed asoc's. However if the FR's
+ * go to the same network (Armando's work) then its
+ * ok to FR multiple times.
+ */
+ (asoc->numnets < 2)
+#else
+ (1)
+#endif
+ ) {
+
+ if (SCTP_TSN_GE(biggest_tsn_newly_acked,
+ tp1->rec.data.fast_retran_tsn)) {
+ /*
+ * Strike the TSN, since this ack is
+ * beyond where things were when we
+ * did a FR.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ strike_flag = 1;
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If
+ * SACK flag is set to 0,
+ * then lowest_newack test
+ * will not pass because it
+ * would have been set to
+ * the cumack earlier. If
+ * not already to be rtx'd,
+ * If not a mixed sack and
+ * if tp1 is not between two
+ * sacked TSNs, then mark by
+ * one more.
+ * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+ * two packets have been received after this missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (num_dests_sacked == 1) &&
+ SCTP_TSN_GT(this_sack_lowest_newack,
+ tp1->rec.data.tsn)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(32 + num_dests_sacked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ }
+ }
+ }
+ }
+ /*
+ * JRI: TODO: remove code for HTNA algo. CMT's
+ * SFR algo covers HTNA.
+ */
+ } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
+ biggest_tsn_newly_acked)) {
+ /*
+ * We don't strike these: This is the HTNA
+ * algorithm i.e. we don't strike If our TSN is
+ * larger than the Highest TSN Newly Acked.
+ */
+ ;
+ } else {
+ /* Strike the TSN */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more.
+ * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+ * two packets have been received after this missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(48 + num_dests_sacked,
+ tp1->rec.data.tsn,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ tp1->sent++;
+ }
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ struct sctp_nets *alt;
+
+ /* fix counts and things */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
+ (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ if (tp1->whoTo) {
+ tp1->whoTo->net_ack++;
+ sctp_flight_size_decrease(tp1);
+ if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+ tp1);
+ }
+ }
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
+ asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ /* add back to the rwnd */
+ asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+
+ /* remove from the total flight */
+ sctp_total_flight_decrease(stcb, tp1);
+
+ if ((stcb->asoc.prsctp_supported) &&
+ (PR_SCTP_RTX_ENABLED(tp1->flags))) {
+ /* Has it been retransmitted tv_sec times? - we store the retran count there. */
+ if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
+ /* Yes, so drop it */
+ if (tp1->data != NULL) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
+ SCTP_SO_NOT_LOCKED);
+ }
+ /* Make sure to flag we had a FR */
+ if (tp1->whoTo != NULL) {
+ tp1->whoTo->net_ack++;
+ }
+ continue;
+ }
+ }
+ /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
+ 0, SCTP_FR_MARKED);
+ }
+ if (strike_flag) {
+ /* This is a subsequent FR */
+ SCTP_STAT_INCR(sctps_sendmultfastretrans);
+ }
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ if (asoc->sctp_cmt_on_off > 0) {
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT.
+ * If CMT is being used, then pick dest with
+ * largest ssthresh for any retransmission.
+ */
+ tp1->no_fr_allowed = 1;
+ alt = tp1->whoTo;
+ /*sa_ignore NO_NULL_CHK*/
+ if (asoc->sctp_cmt_pf > 0) {
+ /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
+ alt = sctp_find_alternate_net(stcb, alt, 2);
+ } else {
+ /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
+ /*sa_ignore NO_NULL_CHK*/
+ alt = sctp_find_alternate_net(stcb, alt, 1);
+ }
+ if (alt == NULL) {
+ alt = tp1->whoTo;
+ }
+ /*
+ * CUCv2: If a different dest is picked for
+ * the retransmission, then new
+ * (rtx-)pseudo_cumack needs to be tracked
+ * for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ if (tp1->whoTo) {
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+
+ } else {/* CMT is OFF */
+
+#ifdef SCTP_FR_TO_ALTERNATE
+ /* Can we find an alternate? */
+ alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
+#else
+ /*
+ * default behavior is to NOT retransmit
+ * FR's to an alternate. Armando Caro's
+ * paper details why.
+ */
+ alt = tp1->whoTo;
+#endif
+ }
+
+ tp1->rec.data.doing_fast_retransmit = 1;
+ tot_retrans++;
+ /* mark the sending seq for possible subsequent FR's */
+ /*
+ * SCTP_PRINTF("Marking TSN for FR new value %x\n",
+ * (uint32_t)tpi->rec.data.tsn);
+ */
+ if (TAILQ_EMPTY(&asoc->send_queue)) {
+ /*
+ * If the queue of send is empty then its
+ * the next sequence number that will be
+ * assigned so we subtract one from this to
+ * get the one we last sent.
+ */
+ tp1->rec.data.fast_retran_tsn = sending_seq;
+ } else {
+ /*
+ * If there are chunks on the send queue
+ * (unsent data that has made it from the
+ * stream queues but not out the door, we
+ * take the first one (which will have the
+ * lowest TSN) and subtract one to get the
+ * one we last sent.
+ */
+ struct sctp_tmit_chunk *ttt;
+
+ ttt = TAILQ_FIRST(&asoc->send_queue);
+ tp1->rec.data.fast_retran_tsn =
+ ttt->rec.data.tsn;
+ }
+
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation pending on
+ * it, cancel it
+ */
+ if ((tp1->whoTo != NULL) &&
+ (tp1->whoTo->rto_needed == 0)) {
+ tp1->whoTo->rto_needed = 1;
+ }
+ tp1->do_rtt = 0;
+ }
+ if (alt != tp1->whoTo) {
+ /* yes, there is an alternate. */
+ sctp_free_remote_addr(tp1->whoTo);
+ /*sa_ignore FREED_MEMORY*/
+ tp1->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ }
+}
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
+ struct timeval now;
+ int now_filled = 0;
+
+ if (asoc->prsctp_supported == 0) {
+ return (NULL);
+ }
+ TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
+ tp1->sent != SCTP_DATAGRAM_RESEND &&
+ tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+ /* no chance to advance, out of here */
+ break;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ asoc->advanced_peer_ack_point,
+ tp1->rec.data.tsn, 0, 0);
+ }
+ }
+ if (!PR_SCTP_ENABLED(tp1->flags)) {
+ /*
+ * We can't fwd-tsn past any that are reliable aka
+ * retransmitted until the asoc fails.
+ */
+ break;
+ }
+ if (!now_filled) {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ now_filled = 1;
+ }
+ /*
+ * now we got a chunk which is marked for another
+ * retransmission to a PR-stream but has run out its chances
+ * already maybe OR has been marked to skip now. Can we skip
+ * it if its a resend?
+ */
+ if (tp1->sent == SCTP_DATAGRAM_RESEND &&
+ (PR_SCTP_TTL_ENABLED(tp1->flags))) {
+ /*
+ * Now is this one marked for resend and its time is
+ * now up?
+ */
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
+#else
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+#endif
+ /* Yes so drop it */
+ if (tp1->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1,
+ 1, SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /*
+ * No, we are done when hit one for resend
+ * whos time as not expired.
+ */
+ break;
+ }
+ }
+ /*
+ * Ok now if this chunk is marked to drop it we can clean up
+ * the chunk, advance our peer ack point and we can check
+ * the next chunk.
+ */
+ if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
+ /* advance PeerAckPoint goes forward */
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
+ asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
+ a_adv = tp1;
+ } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
+ /* No update but we do save the chk */
+ a_adv = tp1;
+ }
+ } else {
+ /*
+ * If it is still in RESEND we can advance no
+ * further
+ */
+ break;
+ }
+ }
+ return (a_adv);
+}
+
+static int
+sctp_fs_audit(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
+ int ret;
+#ifndef INVARIANTS
+ int entry_flight, entry_cnt;
+#endif
+
+ ret = 0;
+#ifndef INVARIANTS
+ entry_flight = asoc->total_flight;
+ entry_cnt = asoc->total_flight_count;
+#endif
+ if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
+ return (0);
+
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
+ chk->rec.data.tsn,
+ chk->send_size,
+ chk->snd_count);
+ inflight++;
+ } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ resend++;
+ } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
+ inbetween++;
+ } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
+ above++;
+ } else {
+ acked++;
+ }
+ }
+
+ if ((inflight > 0) || (inbetween > 0)) {
+#ifdef INVARIANTS
+ panic("Flight size-express incorrect? \n");
+#else
+ SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
+ entry_flight, entry_cnt);
+
+ SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
+ inflight, inbetween, resend, above, acked);
+ ret = 1;
+#endif
+ }
+ return (ret);
+}
+
+
+static void
+sctp_window_probe_recovery(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_tmit_chunk *tp1)
+{
+ tp1->window_probe = 0;
+ if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
+ /* TSN's skipped we do NOT move back. */
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
+ tp1->whoTo ? tp1->whoTo->flight_size : 0,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ return;
+ }
+ /* First setup this by shrinking flight */
+ if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+ tp1);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ /* Now mark for resend */
+ tp1->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+}
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int *abort_now, int ecne_seen)
+{
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ int j, done_once = 0;
+ int rto_ok = 1;
+ uint32_t send_s;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ asoc = &stcb->asoc;
+ old_rwnd = asoc->peers_rwnd;
+ if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
+ /* old ack */
+ return;
+ } else if (asoc->last_acked_seq == cumack) {
+ /* Window update sack */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ goto again;
+ }
+ return;
+ }
+
+ /* First setup for CC stuff */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
+ /* Drag along the window_tsn for cwr's */
+ net->cwr_window_tsn = cumack;
+ }
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
+ }
+ }
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.tsn + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if (SCTP_TSN_GE(cumack, send_s)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ *abort_now = 1;
+ /* XXX */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Cum ack %8.8x greater or equal than TSN %8.8x",
+ cumack, send_s);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ asoc->this_sack_highest_gap = cumack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
+ /* process the new consecutive TSN first */
+ TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+ if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ SCTP_PRINTF("Warning, an unsent is now acked?\n");
+ }
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ sctp_flight_size_decrease(tp1);
+ if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+ tp1);
+ }
+ /* sa_ignore NO_NULL_CHK */
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmitted
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ if (rto_ok &&
+ sctp_calculate_rto(stcb,
+ &stcb->asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ SCTP_RTT_FROM_DATA)) {
+ rto_ok = 0;
+ }
+ if (tp1->whoTo->rto_needed == 0) {
+ tp1->whoTo->rto_needed = 1;
+ }
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+ if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+ }
+ }
+ if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
+ asoc->trigger_reset = 1;
+ }
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.tsn,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
+ } else {
+ break;
+ }
+ }
+
+ }
+#if defined(__Userspace__)
+ if (stcb->sctp_ep->recv_callback) {
+ if (stcb->sctp_socket) {
+ uint32_t inqueue_bytes, sb_free_now;
+ struct sctp_inpcb *inp;
+
+ inp = stcb->sctp_ep;
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+ /* check if the amount free in the send socket buffer crossed the threshold */
+ if (inp->send_callback &&
+ (((inp->send_sb_threshold > 0) &&
+ (sb_free_now >= inp->send_sb_threshold) &&
+ (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
+ (inp->send_sb_threshold == 0))) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ inp->send_callback(stcb->sctp_socket, sb_free_now);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+ }
+ } else if (stcb->sctp_socket) {
+#else
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->sctp_socket) {
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ /* JRS - Use the congestion control given in the CC module */
+ if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+ /* addr came good */
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ 0, (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ if (net == stcb->asoc.primary_destination) {
+ if (stcb->asoc.alternate) {
+ /* release the alternate, primary is good */
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_PF) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+ /* Done with this net */
+ net->net_ack = 0;
+ }
+ /* restore any doubled timers */
+ net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
+ }
+ asoc->last_acked_seq = cumack;
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+
+ /* RWND update */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /* Now assure a timer where data is queued at */
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ win_probe_recovered = 1;
+ /*
+ * Find first chunk that was used with window probe
+ * and clear the sent
+ */
+ /* sa_ignore FREED_MEMORY */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ /* move back to data send queue */
+ sctp_window_probe_recovery(stcb, asoc, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ j++;
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
+ if (net->window_probe) {
+ net->window_probe = 0;
+ }
+ } else {
+ if (net->window_probe) {
+ /* In window probes we must assure a timer is still running there */
+ net->window_probe = 0;
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
+ }
+ } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen unless all packets
+ * are PR-SCTP and marked to skip of course.
+ */
+ if (sctp_fs_audit(asoc)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->stream_queue_cnt == 1) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ *abort_now = 1;
+ /* XXX */
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ struct sctp_nets *netp;
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (asoc->alternate) {
+ netp = asoc->alternate;
+ } else {
+ netp = asoc->primary_destination;
+ }
+ sctp_send_shutdown(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ struct sctp_nets *netp;
+
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (asoc->alternate) {
+ netp = asoc->alternate;
+ } else {
+ netp = asoc->primary_destination;
+ }
+ sctp_send_shutdown_ack(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, netp);
+ }
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
+ asoc->advanced_peer_ack_point = cumack;
+ }
+ /* PR-Sctp issues need to be addressed too */
+ if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing.
+ */
+ if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
+ send_forward_tsn(stcb, asoc);
+ } else if (lchk) {
+ /* try to FR fwd-tsn's that get lost too */
+ if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+ send_forward_tsn(stcb, asoc);
+ }
+ }
+ }
+ for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
+ if (lchk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (lchk != NULL) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
+ uint16_t wake_him = 0;
+ uint32_t send_s = 0;
+ long j;
+ int accum_moved = 0;
+ int will_exit_fast_recovery = 0;
+ uint32_t a_rwnd, old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ struct sctp_nets *net = NULL;
+ int done_once;
+ int rto_ok = 1;
+ uint8_t reneged_all = 0;
+ uint8_t cmt_dac_flag;
+ /*
+ * we take any chance we can to service our queues since we cannot
+ * get awoken when the socket is read from :<
+ */
+ /*
+ * Now perform the actual SACK handling: 1) Verify that it is not an
+ * old sack, if so discard. 2) If there is nothing left in the send
+ * queue (cum-ack is equal to last acked) then you have a duplicate
+ * too, update any rwnd change and verify no timers are running.
+ * then return. 3) Process any new consequtive data i.e. cum-ack
+ * moved process these first and note that it moved. 4) Process any
+ * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+ * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+ * sync up flightsizes and things, stop all timers and also check
+ * for shutdown_pending state. If so then go ahead and send off the
+ * shutdown. If in shutdown recv, send off the shutdown-ack and
+ * start that timer, Ret. 9) Strike any non-acked things and do FR
+ * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+ * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+ * if in shutdown_recv state.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* CMT DAC algo */
+ this_sack_lowest_newack = 0;
+ SCTP_STAT_INCR(sctps_slowpath_sack);
+ last_tsn = cum_ack;
+ cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ a_rwnd = rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+
+ old_rwnd = stcb->asoc.peers_rwnd;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ asoc = &stcb->asoc;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ 0,
+ num_seg,
+ num_dup,
+ SCTP_LOG_NEW_SACK);
+ }
+ if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
+ uint16_t i;
+ uint32_t *dupdata, dblock;
+
+ for (i = 0; i < num_dup; i++) {
+ dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
+ sizeof(uint32_t), (uint8_t *)&dblock);
+ if (dupdata == NULL) {
+ break;
+ }
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+ }
+ }
+ /* reality check */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.tsn + 1;
+ } else {
+ tp1 = NULL;
+ send_s = asoc->sending_seq;
+ }
+ if (SCTP_TSN_GE(cum_ack, send_s)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ /*
+ * no way, we have not even sent this TSN out yet.
+ * Peer is hopelessly messed up with us.
+ */
+ SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
+ cum_ack, send_s);
+ if (tp1) {
+ SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
+ tp1->rec.data.tsn, (void *)tp1);
+ }
+ hopeless_peer:
+ *abort_now = 1;
+ /* XXX */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "Cum ack %8.8x greater or equal than TSN %8.8x",
+ cum_ack, send_s);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ /**********************/
+ /* 1) check the range */
+ /**********************/
+ if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
+ /* acking something behind */
+ return;
+ }
+
+ /* update the Rwnd of the peer */
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ /* nothing left on send/sent and strmq */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->sent_queue_retran_cnt) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* stop any timers */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
+ net->partial_bytes_acked = 0;
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ return;
+ }
+ /*
+ * We init netAckSz and netAckSz2 to 0. These are used to track 2
+ * things. The total byte count acked is tracked in netAckSz AND
+ * netAck2 is used to track the total bytes acked that are un-
+ * amibguious and were never retransmitted. We track these on a per
+ * destination address basis.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
+ /* Drag along the window_tsn for cwr's */
+ net->cwr_window_tsn = cum_ack;
+ }
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
+ }
+
+ /*
+ * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+ * to be greater than the cumack. Also reset saw_newack to 0
+ * for all dests.
+ */
+ net->saw_newack = 0;
+ net->this_sack_highest_newack = last_tsn;
+ }
+ /* process the new consecutive TSN first */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ accum_moved = 1;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if ((tp1->whoTo->dest_state &
+ SCTP_ADDR_UNCONFIRMED) &&
+ (tp1->snd_count < 2)) {
+ /*
+ * If there was no retran
+ * and the address is
+ * un-confirmed and we sent
+ * there and are now
+ * sacked.. its confirmed,
+ * mark it so.
+ */
+ tp1->whoTo->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+ tp1);
+ }
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ /* CMT SFR and DAC algos */
+ this_sack_lowest_newack = tp1->rec.data.tsn;
+ tp1->whoTo->saw_newack = 1;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmitted
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ if (rto_ok &&
+ sctp_calculate_rto(stcb,
+ &stcb->asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ SCTP_RTT_FROM_DATA)) {
+ rto_ok = 0;
+ }
+ if (tp1->whoTo->rto_needed == 0) {
+ tp1->whoTo->rto_needed = 1;
+ }
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.tsn,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB3,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+ /* always set this up to cum-ack */
+ asoc->this_sack_highest_gap = last_tsn;
+
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
+
+ /*
+ * thisSackHighestGap will increase while handling NEW
+ * segments this_sack_highest_newack will increase while
+ * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+ * used for CMT DAC algo. saw_newack will also change.
+ */
+ if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
+ &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, num_nr_seg, &rto_ok)) {
+ wake_him++;
+ }
+ /*
+ * validate the biggest_tsn_acked in the gap acks if
+ * strict adherence is wanted.
+ */
+ if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
+ /*
+ * peer is either confused or we are under
+ * attack. We must abort.
+ */
+ SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
+ biggest_tsn_acked, send_s);
+ goto hopeless_peer;
+ }
+ }
+ /*******************************************/
+ /* cancel ALL T3-send timer if accum moved */
+ /*******************************************/
+ if (asoc->sctp_cmt_on_off > 0) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->new_pseudo_cumack)
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
+
+ }
+ } else {
+ if (accum_moved) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
+ }
+ }
+ }
+ /********************************************/
+ /* drop the acked chunks from the sentqueue */
+ /********************************************/
+ asoc->last_acked_seq = cum_ack;
+
+ TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
+ break;
+ }
+ if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+ if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+ }
+ }
+ if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
+ asoc->trigger_reset = 1;
+ }
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (PR_SCTP_ENABLED(tp1->flags)) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ asoc->sent_queue_cnt--;
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.tsn,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
+ wake_him++;
+ }
+ if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
+#ifdef INVARIANTS
+ panic("Warning flight size is positive and should be 0");
+#else
+ SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
+ asoc->total_flight);
+#endif
+ asoc->total_flight = 0;
+ }
+
+#if defined(__Userspace__)
+ if (stcb->sctp_ep->recv_callback) {
+ if (stcb->sctp_socket) {
+ uint32_t inqueue_bytes, sb_free_now;
+ struct sctp_inpcb *inp;
+
+ inp = stcb->sctp_ep;
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+ /* check if the amount free in the send socket buffer crossed the threshold */
+ if (inp->send_callback &&
+ (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
+ (inp->send_sb_threshold == 0))) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ inp->send_callback(stcb->sctp_socket, sb_free_now);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+ }
+ } else if ((wake_him) && (stcb->sctp_socket)) {
+#else
+ /* sa_ignore NO_NULL_CHK */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ if (asoc->fast_retran_loss_recovery && accum_moved) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
+ /* Setup so we will exit RFC2582 fast recovery */
+ will_exit_fast_recovery = 1;
+ }
+ }
+ /*
+ * Check for revoked fragments:
+ *
+ * if Previous sack - Had no frags then we can't have any revoked if
+ * Previous sack - Had frag's then - If we now have frags aka
+ * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+ * some of them. else - The peer revoked all ACKED fragments, since
+ * we had some before and now we have NONE.
+ */
+
+ if (num_seg) {
+ sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+ asoc->saw_sack_with_frags = 1;
+ } else if (asoc->saw_sack_with_frags) {
+ int cnt_revoked = 0;
+
+ /* Peer revoked all dg's marked or acked */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)tp1->whoTo,
+ tp1->rec.data.tsn);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ tp1->rec.data.chunk_was_revoked = 1;
+ /*
+ * To ensure that this increase in
+ * flightsize, which is artificial,
+ * does not throttle the sender, we
+ * also increase the cwnd
+ * artificially.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ cnt_revoked++;
+ }
+ }
+ if (cnt_revoked) {
+ reneged_all = 1;
+ }
+ asoc->saw_sack_with_frags = 0;
+ }
+ if (num_nr_seg > 0)
+ asoc->saw_sack_with_nr_frags = 1;
+ else
+ asoc->saw_sack_with_nr_frags = 0;
+
+ /* JRS - Use the congestion control given in the CC module */
+ if (ecne_seen == 0) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+ /* addr came good */
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ 0, (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+
+ if (net == stcb->asoc.primary_destination) {
+ if (stcb->asoc.alternate) {
+ /* release the alternate, primary is good */
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ }
+
+ if (net->dest_state & SCTP_ADDR_PF) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+ /* Done with this net */
+ net->net_ack = 0;
+ }
+ /* restore any doubled timers */
+ net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+ }
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /* stop all timers */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->stream_queue_cnt == 1) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ *abort_now = 1;
+ /* XXX */
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ struct sctp_nets *netp;
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (asoc->alternate) {
+ netp = asoc->alternate;
+ } else {
+ netp = asoc->primary_destination;
+ }
+ sctp_send_shutdown(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ return;
+ } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ struct sctp_nets *netp;
+
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (asoc->alternate) {
+ netp = asoc->alternate;
+ } else {
+ netp = asoc->primary_destination;
+ }
+ sctp_send_shutdown_ack(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, netp);
+ return;
+ }
+ }
+ /*
+ * Now here we are going to recycle net_ack for a different use...
+ * HEADS UP.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->net_ack = 0;
+ }
+
+ /*
+ * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+ * to be done. Setting this_sack_lowest_newack to the cum_ack will
+ * automatically ensure that.
+ */
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
+ (cmt_dac_flag == 0)) {
+ this_sack_lowest_newack = cum_ack;
+ }
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
+ sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+ biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+ }
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
+
+ /* Now are we exiting loss recovery ? */
+ if (will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ asoc->fast_retran_loss_recovery = 0;
+ }
+ if ((asoc->sat_t3_loss_recovery) &&
+ SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
+ /* end satellite t3 loss recovery */
+ asoc->sat_t3_loss_recovery = 0;
+ }
+ /*
+ * CMT Fast recovery
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ net->fast_retran_loss_recovery = 0;
+ }
+ }
+
+ /* Adjust and set the new rwnd value */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+
+ /*
+ * Now we must setup so we have a timer up for anyone with
+ * outstanding data.
+ */
+ done_once = 0;
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ win_probe_recovered = 1;
+ /*-
+ * Find first chunk that was used with
+ * window probe and clear the event. Put
+ * it back into the send queue as if has
+ * not been sent.
+ */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ sctp_window_probe_recovery(stcb, asoc, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ j++;
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ if (net->window_probe) {
+ net->window_probe = 0;
+ }
+ } else {
+ if (net->window_probe) {
+ /* In window probes we must assure a timer is still running there */
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+
+ }
+ } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen unless all packets
+ * are PR-SCTP and marked to skip of course.
+ */
+ if (sctp_fs_audit(asoc)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
+ asoc->advanced_peer_ack_point = cum_ack;
+ }
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xee, cum_ack, asoc->advanced_peer_ack_point,
+ old_adv_peer_ack_point);
+ }
+ if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
+ send_forward_tsn(stcb, asoc);
+ } else if (lchk) {
+ /* try to FR fwd-tsn's that get lost too */
+ if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+ send_forward_tsn(stcb, asoc);
+ }
+ }
+ }
+ for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
+ if (lchk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (lchk != NULL) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ a_rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+void
+sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
+{
+ /* Copy cum-ack */
+ uint32_t cum_ack, a_rwnd;
+
+ cum_ack = ntohl(cp->cumulative_tsn_ack);
+ /* Arrange so a_rwnd does NOT change */
+ a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
+
+ /* Now call the express sack handling */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
+}
+
+static void
+sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
+ struct sctp_stream_in *strmin)
+{
+ struct sctp_queued_to_read *control, *ncontrol;
+ struct sctp_association *asoc;
+ uint32_t mid;
+ int need_reasm_check = 0;
+
+ asoc = &stcb->asoc;
+ mid = strmin->last_mid_delivered;
+ /*
+ * First deliver anything prior to and including the stream no that
+ * came in.
+ */
+ TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
+ if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
+ /* this is deliverable now */
+ if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ if (control->on_strm_q) {
+ if (control->on_strm_q == SCTP_ON_ORDERED) {
+ TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
+ } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
+ TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
+#ifdef INVARIANTS
+ } else {
+ panic("strmin: %p ctl: %p unknown %d",
+ strmin, control, control->on_strm_q);
+#endif
+ }
+ control->on_strm_q = 0;
+ }
+ /* subtract pending on streams */
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ if (stcb->sctp_socket) {
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv,
+ 1, SCTP_READ_LOCK_HELD,
+ SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /* Its a fragmented message */
+ if (control->first_frag_seen) {
+ /* Make it so this is next to deliver, we restore later */
+ strmin->last_mid_delivered = control->mid - 1;
+ need_reasm_check = 1;
+ break;
+ }
+ }
+ } else {
+ /* no more delivery now. */
+ break;
+ }
+ }
+ if (need_reasm_check) {
+ int ret;
+ ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
+ if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
+ /* Restore the next to deliver unless we are ahead */
+ strmin->last_mid_delivered = mid;
+ }
+ if (ret == 0) {
+ /* Left the front Partial one on */
+ return;
+ }
+ need_reasm_check = 0;
+ }
+ /*
+ * now we must deliver things in queue the normal way if any are
+ * now ready.
+ */
+ mid = strmin->last_mid_delivered + 1;
+ TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
+ if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
+ if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+ /* this is deliverable now */
+ if (control->on_strm_q) {
+ if (control->on_strm_q == SCTP_ON_ORDERED) {
+ TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
+ } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
+ TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
+#ifdef INVARIANTS
+ } else {
+ panic("strmin: %p ctl: %p unknown %d",
+ strmin, control, control->on_strm_q);
+#endif
+ }
+ control->on_strm_q = 0;
+ }
+ /* subtract pending on streams */
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ strmin->last_mid_delivered = control->mid;
+ if (stcb->sctp_socket) {
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
+
+ }
+ mid = strmin->last_mid_delivered + 1;
+ } else {
+ /* Its a fragmented message */
+ if (control->first_frag_seen) {
+ /* Make it so this is next to deliver */
+ strmin->last_mid_delivered = control->mid - 1;
+ need_reasm_check = 1;
+ break;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ if (need_reasm_check) {
+ (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
+ }
+}
+
+
+
+static void
+sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, struct sctp_stream_in *strm,
+ struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ int cnt_removed = 0;
+
+ /*
+ * For now large messages held on the stream reasm that are
+ * complete will be tossed too. We could in theory do more
+ * work to spin through and stop after dumping one msg aka
+ * seeing the start of a new msg at the head, and call the
+ * delivery function... to see if it can be delivered... But
+ * for now we just dump everything on the queue.
+ */
+ if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
+ return;
+ }
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+ /* Purge hanging chunks */
+ if (!asoc->idata_supported && (ordered == 0)) {
+ if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
+ break;
+ }
+ }
+ cnt_removed++;
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ if (asoc->size_on_reasm_queue >= chk->send_size) {
+ asoc->size_on_reasm_queue -= chk->send_size;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
+#else
+ asoc->size_on_reasm_queue = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ if (!TAILQ_EMPTY(&control->reasm)) {
+ /* This has to be old data, unordered */
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
+ chk = TAILQ_FIRST(&control->reasm);
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ sctp_add_chk_to_control(control, strm, stcb, asoc,
+ chk, SCTP_READ_LOCK_HELD);
+ }
+ sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
+ return;
+ }
+ if (control->on_strm_q == SCTP_ON_ORDERED) {
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ control->on_strm_q = 0;
+ } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+#ifdef INVARIANTS
+ } else if (control->on_strm_q) {
+ panic("strm: %p ctl: %p unknown %d",
+ strm, control, control->on_strm_q);
+#endif
+ }
+ control->on_strm_q = 0;
+ if (control->on_read_q == 0) {
+ sctp_free_remote_addr(control->whoFrom);
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+ }
+}
+
+void
+sctp_handle_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_forward_tsn_chunk *fwd,
+ int *abort_flag, struct mbuf *m , int offset)
+{
+ /* The pr-sctp fwd tsn */
+ /*
+ * here we will perform all the data receiver side steps for
+ * processing FwdTSN, as required in by pr-sctp draft:
+ *
+ * Assume we get FwdTSN(x):
+ *
+ * 1) update local cumTSN to x
+ * 2) try to further advance cumTSN to x + others we have
+ * 3) examine and update re-ordering queue on pr-in-streams
+ * 4) clean up re-assembly queue
+ * 5) Send a sack to report where we are.
+ */
+ struct sctp_association *asoc;
+ uint32_t new_cum_tsn, gap;
+ unsigned int i, fwd_sz, m_size;
+ uint32_t str_seq;
+ struct sctp_stream_in *strm;
+ struct sctp_queued_to_read *control, *sv;
+
+ asoc = &stcb->asoc;
+ if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1,
+ "Bad size too small/big fwd-tsn\n");
+ return;
+ }
+ m_size = (stcb->asoc.mapping_array_size << 3);
+ /*************************************************************/
+ /* 1. Here we update local cumTSN and shift the bitmap array */
+ /*************************************************************/
+ new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
+
+ if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
+ /* Already got there ... */
+ return;
+ }
+ /*
+ * now we know the new TSN is more advanced, let's find the actual
+ * gap
+ */
+ SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
+ asoc->cumulative_tsn = new_cum_tsn;
+ if (gap >= m_size) {
+ if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ /*
+ * out of range (of single byte chunks in the rwnd I
+ * give out). This must be an attacker.
+ */
+ *abort_flag = 1;
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "New cum ack %8.8x too high, highest TSN %8.8x",
+ new_cum_tsn, asoc->highest_tsn_inside_map);
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_STAT_INCR(sctps_fwdtsn_map_over);
+
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ asoc->mapping_array_base_tsn = new_cum_tsn + 1;
+ asoc->highest_tsn_inside_map = new_cum_tsn;
+
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ for (i = 0; i <= gap; i++) {
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
+ !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
+ if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
+ asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
+ }
+ }
+ }
+ }
+ /*************************************************************/
+ /* 2. Clear up re-assembly queue */
+ /*************************************************************/
+
+ /* This is now done as part of clearing up the stream/seq */
+ if (asoc->idata_supported == 0) {
+ uint16_t sid;
+
+ /* Flush all the un-ordered data based on cum-tsn */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ for (sid = 0 ; sid < asoc->streamincnt; sid++) {
+ strm = &asoc->strmin[sid];
+ if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
+ sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
+ }
+ }
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ /*******************************************************/
+ /* 3. Update the PR-stream re-ordering queues and fix */
+ /* delivery issues as needed. */
+ /*******************************************************/
+ fwd_sz -= sizeof(*fwd);
+ if (m && fwd_sz) {
+ /* New method. */
+ unsigned int num_str;
+ uint32_t mid;
+ uint16_t sid;
+ uint16_t ordered, flags;
+ struct sctp_strseq *stseq, strseqbuf;
+ struct sctp_strseq_mid *stseq_m, strseqbuf_m;
+ offset += sizeof(*fwd);
+
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ if (asoc->idata_supported) {
+ num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
+ } else {
+ num_str = fwd_sz / sizeof(struct sctp_strseq);
+ }
+ for (i = 0; i < num_str; i++) {
+ if (asoc->idata_supported) {
+ stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_strseq_mid),
+ (uint8_t *)&strseqbuf_m);
+ offset += sizeof(struct sctp_strseq_mid);
+ if (stseq_m == NULL) {
+ break;
+ }
+ sid = ntohs(stseq_m->sid);
+ mid = ntohl(stseq_m->mid);
+ flags = ntohs(stseq_m->flags);
+ if (flags & PR_SCTP_UNORDERED_FLAG) {
+ ordered = 0;
+ } else {
+ ordered = 1;
+ }
+ } else {
+ stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_strseq),
+ (uint8_t *)&strseqbuf);
+ offset += sizeof(struct sctp_strseq);
+ if (stseq == NULL) {
+ break;
+ }
+ sid = ntohs(stseq->sid);
+ mid = (uint32_t)ntohs(stseq->ssn);
+ ordered = 1;
+ }
+ /* Convert */
+
+ /* now process */
+
+ /*
+ * Ok we now look for the stream/seq on the read queue
+ * where its not all delivered. If we find it we transmute the
+ * read entry into a PDI_ABORTED.
+ */
+ if (sid >= asoc->streamincnt) {
+ /* screwed up streams, stop! */
+ break;
+ }
+ if ((asoc->str_of_pdapi == sid) &&
+ (asoc->ssn_of_pdapi == mid)) {
+ /* If this is the one we were partially delivering
+ * now then we no longer are. Note this will change
+ * with the reassembly re-write.
+ */
+ asoc->fragmented_delivery_inprogress = 0;
+ }
+ strm = &asoc->strmin[sid];
+ if (ordered) {
+ TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
+ if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
+ sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
+ }
+ }
+ } else {
+ if (asoc->idata_supported) {
+ TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
+ if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
+ sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
+ }
+ }
+ } else {
+ if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
+ sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
+ }
+ }
+ }
+ TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
+ if ((control->sinfo_stream == sid) &&
+ (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
+ str_seq = (sid << 16) | (0x0000ffff & mid);
+ control->pdapi_aborted = 1;
+ sv = stcb->asoc.control_pdapi;
+ control->end_added = 1;
+ if (control->on_strm_q == SCTP_ON_ORDERED) {
+ TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
+ TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+#ifdef INVARIANTS
+ } else if (control->on_strm_q) {
+ panic("strm: %p ctl: %p unknown %d",
+ strm, control, control->on_strm_q);
+#endif
+ }
+ control->on_strm_q = 0;
+ stcb->asoc.control_pdapi = control;
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ (void *)&str_seq,
+ SCTP_SO_NOT_LOCKED);
+ stcb->asoc.control_pdapi = sv;
+ break;
+ } else if ((control->sinfo_stream == sid) &&
+ SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
+ /* We are past our victim SSN */
+ break;
+ }
+ }
+ if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
+ /* Update the sequence number */
+ strm->last_mid_delivered = mid;
+ }
+ /* now kick the stream the new way */
+ /*sa_ignore NO_NULL_CHK*/
+ sctp_kick_prsctp_reorder_queue(stcb, strm);
+ }
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ /*
+ * Now slide thing forward.
+ */
+ sctp_slide_mapping_arrays(stcb);
+}
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.h
new file mode 100644
index 000000000..829a014f2
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_indata.h
@@ -0,0 +1,124 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.h 361116 2020-05-16 19:26:39Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_INDATA_H_
+#define _NETINET_SCTP_INDATA_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t sid,
+ uint32_t mid, uint8_t flags,
+ struct mbuf *dm);
+
+
+#define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, sid, flags, dm, tfsn, mid) do { \
+ if (_ctl) { \
+ atomic_add_int(&((net)->ref_count), 1); \
+ memset(_ctl, 0, sizeof(struct sctp_queued_to_read)); \
+ (_ctl)->sinfo_stream = sid; \
+ TAILQ_INIT(&_ctl->reasm); \
+ (_ctl)->top_fsn = tfsn; \
+ (_ctl)->mid = mid; \
+ (_ctl)->sinfo_flags = (flags << 8); \
+ (_ctl)->sinfo_ppid = ppid; \
+ (_ctl)->sinfo_context = context; \
+ (_ctl)->fsn_included = 0xffffffff; \
+ (_ctl)->sinfo_tsn = tsn; \
+ (_ctl)->sinfo_cumtsn = tsn; \
+ (_ctl)->sinfo_assoc_id = sctp_get_associd((in_it)); \
+ (_ctl)->whoFrom = net; \
+ (_ctl)->data = dm; \
+ (_ctl)->stcb = (in_it); \
+ (_ctl)->port_from = (in_it)->rport; \
+ if ((in_it)->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { \
+ (_ctl)->do_not_ref_stcb = 1; \
+ }\
+ } \
+} while (0)
+
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+ struct sctp_sndrcvinfo *sinfo);
+
+void sctp_set_rwnd(struct sctp_tcb *, struct sctp_association *);
+
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc);
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int *abort_now, int ecne_seen);
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd, int ecne_seen);
+
+/* draft-ietf-tsvwg-usctp */
+void
+sctp_handle_forward_tsn(struct sctp_tcb *,
+ struct sctp_forward_tsn_chunk *, int *, struct mbuf *, int);
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_service_queues(struct sctp_tcb *, struct sctp_association *);
+
+void
+sctp_update_acked(struct sctp_tcb *, struct sctp_shutdown_chunk *, int *);
+
+int
+sctp_process_data(struct mbuf **, int, int *, int,
+ struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint32_t *);
+
+void sctp_slide_mapping_arrays(struct sctp_tcb *stcb);
+
+void sctp_sack_check(struct sctp_tcb *, int);
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.c
new file mode 100644
index 000000000..7be22d32d
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.c
@@ -0,0 +1,6426 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 363194 2020-07-14 20:32:50Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_crc32.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_kdtrace.h>
+#endif
+#if defined(INET) || defined(INET6)
+#if !defined(_WIN32)
+#include <netinet/udp.h>
+#endif
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/smp.h>
+#endif
+
+static void
+sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ /* This now not only stops all cookie timers
+ * it also stops any INIT timers as well. This
+ * will make sure that the timers are stopped in
+ * all collision cases.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
+ stcb->sctp_ep,
+ stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
+ } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
+ stcb->sctp_ep,
+ stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
+ }
+ }
+}
+
+/* INIT handler */
+static void
+sctp_handle_init(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+ struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_init *init;
+ struct mbuf *op_err;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
+ (void *)stcb);
+ if (stcb == NULL) {
+ SCTP_INP_RLOCK(inp);
+ }
+ /* validate length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ /* validate parameters */
+ init = &cp->init;
+ if (init->initiate_tag == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
+ /* invalid parameter... send abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (init->num_inbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (init->num_outbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
+ offset + ntohs(cp->ch.chunk_length))) {
+ /* auth parameter(s) error... send abort */
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Problem with AUTH parameters");
+ sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ /* We are only accepting if we have a listening socket.*/
+ if ((stcb == NULL) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (!SCTP_IS_LISTENING(inp)))) {
+ /*
+ * FIX ME ?? What about TCP model and we have a
+ * match/restart case? Actually no fix is needed.
+ * the lookup will always find the existing assoc so stcb
+ * would not be NULL. It may be questionable to do this
+ * since we COULD just send back the INIT-ACK and hope that
+ * the app did accept()'s by the time the COOKIE was sent. But
+ * there is a price to pay for COOKIE generation and I don't
+ * want to pay it on the chance that the app will actually do
+ * some accepts(). The App just looses and should NOT be in
+ * this state :-)
+ */
+ if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "No listener");
+ sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ }
+ goto outnow;
+ }
+ if ((stcb != NULL) &&
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
+ sctp_send_shutdown_ack(stcb, NULL);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ } else {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
+ sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
+ src, dst, sh, cp,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ }
+ outnow:
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+}
+
+/*
+ * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
+ */
+
+int
+sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
+{
+ int unsent_data;
+ unsigned int i;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_association *asoc;
+
+ /* This function returns if any stream has true unsent data on it.
+ * Note that as it looks through it will clean up any places that
+ * have old data that has been sent but left at top of stream queue.
+ */
+ asoc = &stcb->asoc;
+ unsent_data = 0;
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
+ /* Check to see if some data queued */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ /*sa_ignore FREED_MEMORY*/
+ sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
+ if (sp == NULL) {
+ continue;
+ }
+ if ((sp->msg_is_complete) &&
+ (sp->length == 0) &&
+ (sp->sender_all_done)) {
+ /* We are doing differed cleanup. Last
+ * time through when we took all the data
+ * the sender_all_done was not set.
+ */
+ if (sp->put_last_out == 0) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out);
+ }
+ atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
+ TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp, so_locked);
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ unsent_data++;
+ }
+ } else {
+ unsent_data++;
+ }
+ if (unsent_data > 0) {
+ break;
+ }
+ }
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (unsent_data);
+}
+
+static int
+sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
+{
+ struct sctp_init *init;
+ struct sctp_association *asoc;
+ struct sctp_nets *lnet;
+ unsigned int i;
+
+ init = &cp->init;
+ asoc = &stcb->asoc;
+ /* save off parameters */
+ asoc->peer_vtag = ntohl(init->initiate_tag);
+ asoc->peers_rwnd = ntohl(init->a_rwnd);
+ /* init tsn's */
+ asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
+
+ if (!TAILQ_EMPTY(&asoc->nets)) {
+ /* update any ssthresh's that may have a default */
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ lnet->ssthresh = asoc->peers_rwnd;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
+ }
+
+ }
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
+ unsigned int newcnt;
+ struct sctp_stream_out *outs;
+ struct sctp_stream_queue_pending *sp, *nsp;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ /* abandon the upper streams */
+ newcnt = ntohs(init->num_inbound_streams);
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+ if (chk->rec.data.sid >= newcnt) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+ }
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+ 0, chk, SCTP_SO_NOT_LOCKED);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ }
+ if (asoc->strmout) {
+ for (i = newcnt; i < asoc->pre_open_streams; i++) {
+ outs = &asoc->strmout[i];
+ TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+ atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
+ stcb, 0, sp, SCTP_SO_NOT_LOCKED);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ /* Free the chunk */
+ sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ outs->state = SCTP_STREAM_CLOSED;
+ }
+ }
+ /* cut back the count */
+ asoc->pre_open_streams = newcnt;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ asoc->streamoutcnt = asoc->pre_open_streams;
+ if (asoc->strmout) {
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ asoc->strmout[i].state = SCTP_STREAM_OPEN;
+ }
+ }
+ /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ /* This is the next one we expect */
+ asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
+
+ asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
+ asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ /* open the requested streams */
+
+ if (asoc->strmin != NULL) {
+ /* Free the old ones */
+ for (i = 0; i < asoc->streamincnt; i++) {
+ sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
+ sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
+ }
+ SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+ }
+ if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
+ asoc->streamincnt = ntohs(init->num_outbound_streams);
+ } else {
+ asoc->streamincnt = asoc->max_inbound_streams;
+ }
+ SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
+ sizeof(struct sctp_stream_in), SCTP_M_STRMI);
+ if (asoc->strmin == NULL) {
+ /* we didn't get memory for the streams! */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
+ return (-1);
+ }
+ for (i = 0; i < asoc->streamincnt; i++) {
+ asoc->strmin[i].sid = i;
+ asoc->strmin[i].last_mid_delivered = 0xffffffff;
+ TAILQ_INIT(&asoc->strmin[i].inqueue);
+ TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
+ asoc->strmin[i].pd_api_started = 0;
+ asoc->strmin[i].delivery_started = 0;
+ }
+ /*
+ * load_address_from_init will put the addresses into the
+ * association when the COOKIE is processed or the INIT-ACK is
+ * processed. Both types of COOKIE's existing and new call this
+ * routine. It will remove addresses that are no longer in the
+ * association (for the restarting case where addresses are
+ * removed). Up front when the INIT arrives we will discard it if it
+ * is a restart and new addresses have been added.
+ */
+ /* sa_ignore MEMLEAK */
+ return (0);
+}
+
+/*
+ * INIT-ACK message processing/consumption returns value < 0 on error
+ */
+static int
+sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+ struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id)
+{
+ struct sctp_association *asoc;
+ struct mbuf *op_err;
+ int retval, abort_flag, cookie_found;
+ int initack_limit;
+ int nat_friendly = 0;
+
+ /* First verify that we have no illegal param's */
+ abort_flag = 0;
+ cookie_found = 0;
+
+ op_err = sctp_arethere_unrecognized_parameters(m,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag, (struct sctp_chunkhdr *)cp,
+ &nat_friendly, &cookie_found);
+ if (abort_flag) {
+ /* Send an abort and notify peer */
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (!cookie_found) {
+ uint16_t len;
+
+ /* Only report the missing cookie parameter */
+ if (op_err != NULL) {
+ sctp_m_freem(op_err);
+ }
+ len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
+ /* We abort with an error of missing mandatory param */
+ op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err != NULL) {
+ struct sctp_error_missing_param *cause;
+
+ SCTP_BUF_LEN(op_err) = len;
+ cause = mtod(op_err, struct sctp_error_missing_param *);
+ /* Subtract the reserved param */
+ cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
+ cause->cause.length = htons(len);
+ cause->num_missing_params = htonl(1);
+ cause->type[0] = htons(SCTP_STATE_COOKIE);
+ }
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-3);
+ }
+ asoc = &stcb->asoc;
+ asoc->peer_supports_nat = (uint8_t)nat_friendly;
+ /* process the peer's parameters in the INIT-ACK */
+ retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
+ if (retval < 0) {
+ if (op_err != NULL) {
+ sctp_m_freem(op_err);
+ }
+ return (retval);
+ }
+ initack_limit = offset + ntohs(cp->ch.chunk_length);
+ /* load all addresses */
+ if ((retval = sctp_load_addresses_from_init(stcb, m,
+ (offset + sizeof(struct sctp_init_chunk)), initack_limit,
+ src, dst, NULL, stcb->asoc.port))) {
+ if (op_err != NULL) {
+ sctp_m_freem(op_err);
+ }
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Problem with address parameters");
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "Load addresses from INIT causes an abort %d\n",
+ retval);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ /* if the peer doesn't support asconf, flush the asconf queue */
+ if (asoc->asconf_supported == 0) {
+ struct sctp_asconf_addr *param, *nparam;
+
+ TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
+ TAILQ_REMOVE(&asoc->asconf_queue, param, next);
+ SCTP_FREE(param, SCTP_M_ASC_ADDR);
+ }
+ }
+
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+ if (op_err) {
+ sctp_queue_op_err(stcb, op_err);
+ /* queuing will steal away the mbuf chain to the out queue */
+ op_err = NULL;
+ }
+ /* extract the cookie and queue it to "echo" it back... */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ net->error_count = 0;
+
+ /*
+ * Cancel the INIT timer, We do this first before queueing the
+ * cookie. We always cancel at the primary to assue that we are
+ * canceling the timer started by the INIT which always goes to the
+ * primary.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
+ asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
+
+ /* calculate the RTO */
+ sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
+ SCTP_RTT_FROM_NON_DATA);
+#if defined(__Userspace__)
+ if (stcb->sctp_ep->recv_callback) {
+ if (stcb->sctp_socket) {
+ uint32_t inqueue_bytes, sb_free_now;
+ struct sctp_inpcb *inp;
+
+ inp = stcb->sctp_ep;
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+ /* check if the amount free in the send socket buffer crossed the threshold */
+ if (inp->send_callback &&
+ (((inp->send_sb_threshold > 0) &&
+ (sb_free_now >= inp->send_sb_threshold) &&
+ (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
+ (inp->send_sb_threshold == 0))) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ inp->send_callback(stcb->sctp_socket, sb_free_now);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+ }
+ }
+#endif
+ retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
+ return (retval);
+}
+
+static void
+sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ union sctp_sockstore store;
+ struct sctp_nets *r_net, *f_net;
+ struct timeval tv;
+ int req_prim = 0;
+ uint16_t old_error_counter;
+
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
+ /* Invalid length */
+ return;
+ }
+
+ memset(&store, 0, sizeof(store));
+ switch (cp->heartbeat.hb_info.addr_family) {
+#ifdef INET
+ case AF_INET:
+ if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
+ store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SIN_LEN
+ store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
+#endif
+ store.sin.sin_port = stcb->rport;
+ memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
+ sizeof(store.sin.sin_addr));
+ } else {
+ return;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
+ store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SIN6_LEN
+ store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
+#endif
+ store.sin6.sin6_port = stcb->rport;
+ memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
+ } else {
+ return;
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
+ store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SCONN_LEN
+ store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
+#endif
+ store.sconn.sconn_port = stcb->rport;
+ memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
+ } else {
+ return;
+ }
+ break;
+#endif
+ default:
+ return;
+ }
+ r_net = sctp_findnet(stcb, &store.sa);
+ if (r_net == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
+ return;
+ }
+ if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
+ (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
+ /*
+ * If the its a HB and it's random value is correct when can
+ * confirm the destination.
+ */
+ r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
+ stcb->asoc.primary_destination = r_net;
+ r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+ f_net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (f_net != r_net) {
+ /* first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficient if
+ * the primary is the first on the list, make it
+ * so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
+ }
+ req_prim = 1;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
+ r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ old_error_counter = r_net->error_count;
+ r_net->error_count = 0;
+ r_net->hb_responded = 1;
+ tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
+ tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
+ /* Now lets do a RTO with this */
+ sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
+ SCTP_RTT_FROM_NON_DATA);
+ if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
+ r_net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
+ }
+ if (r_net->dest_state & SCTP_ADDR_PF) {
+ r_net->dest_state &= ~SCTP_ADDR_PF;
+ stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+ }
+ if (old_error_counter > 0) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
+ }
+ if (r_net == stcb->asoc.primary_destination) {
+ if (stcb->asoc.alternate) {
+ /* release the alternate, primary is good */
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ }
+ /* Mobility adaptation */
+ if (req_prim) {
+ if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED)) {
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
+ stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_assoc_immediate_retrans(stcb,
+ stcb->asoc.primary_destination);
+ }
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE)) {
+ sctp_move_chunks_from_net(stcb,
+ stcb->asoc.deleted_primary);
+ }
+ sctp_delete_prim_timer(stcb->sctp_ep, stcb);
+ }
+ }
+}
+
+static int
+sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
+{
+ /*
+ * Return 0 means we want you to proceed with the abort
+ * non-zero means no abort processing.
+ */
+ uint32_t new_vtag;
+ struct sctpasochead *head;
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ } else {
+ return (0);
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ stcb->asoc.my_vtag = new_vtag;
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /* put it in the bucket in the vtag hash of assoc's for the system */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ SCTP_INP_INFO_WUNLOCK();
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ } else {
+ /* treat like a case where the cookie expired i.e.:
+ * - dump current cookie.
+ * - generate a new vtag.
+ * - resend init.
+ */
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
+ stcb->asoc.my_vtag = new_vtag;
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /* put it in the bucket in the vtag hash of assoc's for the system */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ SCTP_INP_INFO_WUNLOCK();
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* return 0 means we want you to proceed with the abort
+ * non-zero means no abort processing
+ */
+ if (stcb->asoc.auth_supported == 0) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
+ return (0);
+ }
+ sctp_asconf_send_nat_state_update(stcb, net);
+ return (1);
+}
+
+
+/* Returns 1 if the stcb was aborted, 0 otherwise */
+static int
+sctp_handle_abort(struct sctp_abort_chunk *abort,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+ uint16_t len;
+ uint16_t error;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
+ if (stcb == NULL)
+ return (0);
+
+ len = ntohs(abort->ch.chunk_length);
+ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
+ /* Need to check the cause codes for our
+ * two magic nat aborts which don't kill the assoc
+ * necessarily.
+ */
+ struct sctp_error_cause *cause;
+
+ cause = (struct sctp_error_cause *)(abort + 1);
+ error = ntohs(cause->code);
+ if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n",
+ abort->ch.chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return (0);
+ }
+ } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n",
+ abort->ch.chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return (0);
+ }
+ }
+ } else {
+ error = 0;
+ }
+ /* stop any receive timers */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
+ /* notify user of the abort and clean up... */
+ sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
+ /* free the tcb */
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ sctp_print_out_track_log(stcb);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
+ return (1);
+}
+
+static void
+sctp_start_net_timers(struct sctp_tcb *stcb)
+{
+ uint32_t cnt_hb_sent;
+ struct sctp_nets *net;
+
+ cnt_hb_sent = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* For each network start:
+ * 1) A pmtu timer.
+ * 2) A HB timer
+ * 3) If the dest in unconfirmed send
+ * a hb as well if under max_hb_burst have
+ * been sent.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ cnt_hb_sent++;
+ }
+ }
+ if (cnt_hb_sent) {
+ sctp_chunk_output(stcb->sctp_ep, stcb,
+ SCTP_OUTPUT_FROM_COOKIE_ACK,
+ SCTP_SO_NOT_LOCKED);
+ }
+}
+
+
+static void
+sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
+{
+ struct sctp_association *asoc;
+ int some_on_streamwheel;
+ int old_state;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown: handling SHUTDOWN\n");
+ if (stcb == NULL)
+ return;
+ asoc = &stcb->asoc;
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ return;
+ }
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
+ /* Shutdown NOT the expected size */
+ return;
+ }
+ old_state = SCTP_GET_STATE(stcb);
+ sctp_update_acked(stcb, cp, abort_flag);
+ if (*abort_flag) {
+ return;
+ }
+ if (asoc->control_pdapi) {
+ /* With a normal shutdown
+ * we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ if (asoc->control_pdapi->on_strm_q) {
+ struct sctp_stream_in *strm;
+
+ strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
+ if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
+ /* Unordered */
+ TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
+ asoc->control_pdapi->on_strm_q = 0;
+ } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
+ /* Ordered */
+ TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
+ asoc->control_pdapi->on_strm_q = 0;
+#ifdef INVARIANTS
+ } else {
+ panic("Unknown state on ctrl:%p on_strm_q:%d",
+ asoc->control_pdapi,
+ asoc->control_pdapi->on_strm_q);
+#endif
+ }
+ }
+ asoc->control_pdapi->end_added = 1;
+ asoc->control_pdapi->pdapi_aborted = 1;
+ asoc->control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ if (stcb->sctp_socket) {
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* goto SHUTDOWN_RECEIVED state to block new requests */
+ if (stcb->sctp_socket) {
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
+ /* notify upper layer that peer has initiated a shutdown */
+ sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+
+ /* reset time */
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ }
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * stop the shutdown timer, since we WILL move to
+ * SHUTDOWN-ACK-SENT.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
+ }
+ /* Now is there unsent data on a stream somewhere? */
+ some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
+
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ some_on_streamwheel) {
+ /* By returning we will push more data out */
+ return;
+ } else {
+ /* no outstanding data to send, so move on... */
+ /* send SHUTDOWN-ACK */
+ /* move to SHUTDOWN-ACK-SENT state */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown_ack(stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, net);
+ } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ sctp_send_shutdown_ack(stcb, net);
+ }
+ }
+}
+
+static void
+sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+ /* process according to association state */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* unexpected SHUTDOWN-ACK... do OOTB handling... */
+ sctp_send_shutdown_complete(stcb, net, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* unexpected SHUTDOWN-ACK... so ignore... */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (asoc->control_pdapi) {
+ /* With a normal shutdown
+ * we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ asoc->control_pdapi->end_added = 1;
+ asoc->control_pdapi->pdapi_aborted = 1;
+ asoc->control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+#ifdef INVARIANTS
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
+ panic("Queues are not empty when handling SHUTDOWN-ACK");
+ }
+#endif
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
+ /* send SHUTDOWN-COMPLETE */
+ sctp_send_shutdown_complete(stcb, net, 0);
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ stcb->sctp_socket->so_snd.sb_cc = 0;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB but first save off the ep */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+}
+
+static void
+sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
+{
+ switch (chunk_type) {
+ case SCTP_ASCONF_ACK:
+ case SCTP_ASCONF:
+ sctp_asconf_cleanup(stcb);
+ break;
+ case SCTP_IFORWARD_CUM_TSN:
+ case SCTP_FORWARD_CUM_TSN:
+ stcb->asoc.prsctp_supported = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Peer does not support chunk type %d (0x%x).\n",
+ chunk_type, chunk_type);
+ break;
+ }
+}
+
+/*
+ * Skip past the param header and then we will find the param that caused the
+ * problem. There are a number of param's in a ASCONF OR the prsctp param
+ * these will turn of specific features.
+ * XXX: Is this the right thing to do?
+ */
+static void
+sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
+{
+ switch (parameter_type) {
+ /* pr-sctp draft */
+ case SCTP_PRSCTP_SUPPORTED:
+ stcb->asoc.prsctp_supported = 0;
+ break;
+ case SCTP_SUPPORTED_CHUNK_EXT:
+ break;
+ /* draft-ietf-tsvwg-addip-sctp */
+ case SCTP_HAS_NAT_SUPPORT:
+ stcb->asoc.peer_supports_nat = 0;
+ break;
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_SET_PRIM_ADDR:
+ stcb->asoc.asconf_supported = 0;
+ break;
+ case SCTP_SUCCESS_REPORT:
+ case SCTP_ERROR_CAUSE_IND:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Turning off ASCONF to this strange peer\n");
+ stcb->asoc.asconf_supported = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Peer does not support param type %d (0x%x)??\n",
+ parameter_type, parameter_type);
+ break;
+ }
+}
+
+static int
+sctp_handle_error(struct sctp_chunkhdr *ch,
+ struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
+{
+ struct sctp_error_cause *cause;
+ struct sctp_association *asoc;
+ uint32_t remaining_length, adjust;
+ uint16_t code, cause_code, cause_length;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ /* parse through all of the errors and process */
+ asoc = &stcb->asoc;
+ cause = (struct sctp_error_cause *)((caddr_t)ch +
+ sizeof(struct sctp_chunkhdr));
+ remaining_length = ntohs(ch->chunk_length);
+ if (remaining_length > limit) {
+ remaining_length = limit;
+ }
+ if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
+ remaining_length -= sizeof(struct sctp_chunkhdr);
+ } else {
+ remaining_length = 0;
+ }
+ code = 0;
+ while (remaining_length >= sizeof(struct sctp_error_cause)) {
+ /* Process an Error Cause */
+ cause_code = ntohs(cause->code);
+ cause_length = ntohs(cause->length);
+ if ((cause_length > remaining_length) || (cause_length == 0)) {
+ /* Invalid cause length, possibly due to truncation. */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
+ remaining_length, cause_length);
+ return (0);
+ }
+ if (code == 0) {
+ /* report the first error cause */
+ code = cause_code;
+ }
+ switch (cause_code) {
+ case SCTP_CAUSE_INVALID_STREAM:
+ case SCTP_CAUSE_MISSING_PARAM:
+ case SCTP_CAUSE_INVALID_PARAM:
+ case SCTP_CAUSE_NO_USER_DATA:
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
+ cause_code);
+ break;
+ case SCTP_CAUSE_NAT_COLLIDING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return (0);
+ }
+ break;
+ case SCTP_CAUSE_NAT_MISSING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return (0);
+ }
+ break;
+ case SCTP_CAUSE_STALE_COOKIE:
+ /*
+ * We only act if we have echoed a cookie and are
+ * waiting.
+ */
+ if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ struct sctp_error_stale_cookie *stale_cookie;
+
+ stale_cookie = (struct sctp_error_stale_cookie *)cause;
+ asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time);
+ /* Double it to be more robust on RTX */
+ if (asoc->cookie_preserve_req <= UINT32_MAX / 2) {
+ asoc->cookie_preserve_req *= 2;
+ } else {
+ asoc->cookie_preserve_req = UINT32_MAX;
+ }
+ asoc->stale_cookie_count++;
+ if (asoc->stale_cookie_count >
+ asoc->max_init_times) {
+ sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
+ /* now free the asoc */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (-1);
+ }
+ /* blast back to INIT state */
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_CAUSE_UNRESOLVABLE_ADDR:
+ /*
+ * Nothing we can do here, we don't do hostname
+ * addresses so if the peer does not like my IPv6
+ * (or IPv4 for that matter) it does not matter. If
+ * they don't support that type of address, they can
+ * NOT possibly get that packet type... i.e. with no
+ * IPv6 you can't receive a IPv6 packet. so we can
+ * safely ignore this one. If we ever added support
+ * for HOSTNAME Addresses, then we would need to do
+ * something here.
+ */
+ break;
+ case SCTP_CAUSE_UNRECOG_CHUNK:
+ if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
+ struct sctp_error_unrecognized_chunk *unrec_chunk;
+
+ unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
+ sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
+ }
+ break;
+ case SCTP_CAUSE_UNRECOG_PARAM:
+ /* XXX: We only consider the first parameter */
+ if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
+ struct sctp_paramhdr *unrec_parameter;
+
+ unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
+ sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
+ }
+ break;
+ case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
+ /*
+ * We ignore this since the timer will drive out a
+ * new cookie anyway and there timer will drive us
+ * to send a SHUTDOWN_COMPLETE. We can't send one
+ * here since we don't have their tag.
+ */
+ break;
+ case SCTP_CAUSE_DELETING_LAST_ADDR:
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ case SCTP_CAUSE_DELETING_SRC_ADDR:
+ /*
+ * We should NOT get these here, but in a
+ * ASCONF-ACK.
+ */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
+ cause_code);
+ break;
+ case SCTP_CAUSE_OUT_OF_RESC:
+ /*
+ * And what, pray tell do we do with the fact that
+ * the peer is out of resources? Not really sure we
+ * could do anything but abort. I suspect this
+ * should have came WITH an abort instead of in a
+ * OP-ERROR.
+ */
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
+ cause_code);
+ break;
+ }
+ adjust = SCTP_SIZE32(cause_length);
+ if (remaining_length >= adjust) {
+ remaining_length -= adjust;
+ } else {
+ remaining_length = 0;
+ }
+ cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
+ return (0);
+}
+
+static int
+sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+ struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id)
+{
+ struct sctp_init_ack *init_ack;
+ struct mbuf *op_err;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_init_ack: handling INIT-ACK\n");
+
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_init_ack: TCB is null\n");
+ return (-1);
+ }
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
+ /* Invalid length */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ init_ack = &cp->init;
+ /* validate parameters */
+ if (init_ack->initiate_tag == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (init_ack->num_inbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (init_ack->num_outbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ /* process according to association state... */
+ switch (SCTP_GET_STATE(stcb)) {
+ case SCTP_STATE_COOKIE_WAIT:
+ /* this is the expected state for this chunk */
+ /* process the INIT-ACK parameters */
+ if (stcb->asoc.primary_destination->dest_state &
+ SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * The primary is where we sent the INIT, we can
+ * always consider it confirmed when the INIT-ACK is
+ * returned. Do this before we load addresses
+ * though.
+ */
+ stcb->asoc.primary_destination->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
+ }
+ if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
+ net, abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id) < 0) {
+ /* error in parsing parameters */
+ return (-1);
+ }
+ /* update our state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
+
+ /* reset the RTO calc */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ /*
+ * collapse the init timer back in case of a exponential
+ * backoff
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
+ stcb, net);
+ /*
+ * the send at the end of the inbound data processing will
+ * cause the cookie to be sent
+ */
+ break;
+ case SCTP_STATE_SHUTDOWN_SENT:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_COOKIE_ECHOED:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_OPEN:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_EMPTY:
+ case SCTP_STATE_INUSE:
+ default:
+ /* incorrect state... discard */
+ return (-1);
+ break;
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
+ return (0);
+}
+
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port);
+
+
+/*
+ * handle a state cookie for an existing association m: input packet mbuf
+ * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
+ * "split" mbuf and the cookie signature does not exist offset: offset into
+ * mbuf to the cookie-echo chunk
+ */
+static struct sctp_tcb *
+sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_association *asoc;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ struct sctp_nets *net;
+ struct mbuf *op_err;
+ struct timeval old;
+ int init_offset, initack_offset, i;
+ int retval;
+ int spec_flag = 0;
+ uint32_t how_indx;
+#if defined(SCTP_DETAILED_STR_STATS)
+ int j;
+#endif
+
+ net = *netp;
+ /* I know that the TCB is non-NULL from the caller */
+ asoc = &stcb->asoc;
+ for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
+ if (asoc->cookie_how[how_indx] == 0)
+ break;
+ }
+ if (how_indx < sizeof(asoc->cookie_how)) {
+ asoc->cookie_how[how_indx] = 1;
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* SHUTDOWN came in after sending INIT-ACK */
+ sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+ op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
+ sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, net->port);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 2;
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
+
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+ return (NULL);
+ }
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+ return (NULL);
+ }
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
+ /*
+ * case D in Section 5.2.4 Table 2: MMAA process accordingly
+ * to get into the OPEN state
+ */
+ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+ /*-
+ * Opps, this means that we somehow generated two vtag's
+ * the same. I.e. we did:
+ * Us Peer
+ * <---INIT(tag=a)------
+ * ----INIT-ACK(tag=t)-->
+ * ----INIT(tag=t)------> *1
+ * <---INIT-ACK(tag=a)---
+ * <----CE(tag=t)------------- *2
+ *
+ * At point *1 we should be generating a different
+ * tag t'. Which means we would throw away the CE and send
+ * ours instead. Basically this is case C (throw away side).
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 17;
+ return (NULL);
+
+ }
+ switch (SCTP_GET_STATE(stcb)) {
+ case SCTP_STATE_COOKIE_WAIT:
+ case SCTP_STATE_COOKIE_ECHOED:
+ /*
+ * INIT was sent but got a COOKIE_ECHO with the
+ * correct tags... just accept it...but we must
+ * process the init so that we can make sure we
+ * have the right seq no's.
+ */
+ /* First we must process the INIT !! */
+ retval = sctp_process_init(init_cp, stcb);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 3;
+ return (NULL);
+ }
+ /* we have already processed the INIT so no problem */
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
+ stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
+ stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
+ /* update current state */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ else
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ sctp_stop_all_cookie_timers(stcb);
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (!SCTP_IS_LISTENING(inp))) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+ /*
+ * Here is where collision would go if we
+ * did a connect() and instead got a
+ * init/init-ack/cookie done before the
+ * init-ack came back..
+ */
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ /*
+ * since we did not send a HB make sure we
+ * don't double things
+ */
+ old.tv_sec = cookie->time_entered.tv_sec;
+ old.tv_usec = cookie->time_entered.tv_usec;
+ net->hb_responded = 1;
+ sctp_calculate_rto(stcb, asoc, net, &old,
+ SCTP_RTT_FROM_NON_DATA);
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ inp, stcb, NULL);
+ }
+ break;
+ default:
+ /*
+ * we're in the OPEN state (or beyond), so
+ * peer must have simply lost the COOKIE-ACK
+ */
+ break;
+ } /* end switch */
+ sctp_stop_all_cookie_timers(stcb);
+ /*
+ * We ignore the return code here.. not sure if we should
+ * somehow abort.. but we do have an existing asoc. This
+ * really should not fail.
+ */
+ if (sctp_load_addresses_from_init(stcb, m,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, src, dst, init_src, stcb->asoc.port)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 4;
+ return (NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 5;
+ return (stcb);
+ }
+
+ if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
+ cookie->tie_tag_my_vtag == 0 &&
+ cookie->tie_tag_peer_vtag == 0) {
+ /*
+ * case C in Section 5.2.4 Table 2: XMOO silently discard
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 6;
+ return (NULL);
+ }
+ /* If nat support, and the below and stcb is established,
+ * send back a ABORT(colliding state) if we are established.
+ */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
+ (asoc->peer_supports_nat) &&
+ ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0)))) {
+ /* Special case - Peer's support nat. We may have
+ * two init's that we gave out the same tag on since
+ * one was not established.. i.e. we get INIT from host-1
+ * behind the nat and we respond tag-a, we get a INIT from
+ * host-2 behind the nat and we get tag-a again. Then we
+ * bring up host-1 (or 2's) assoc, Then comes the cookie
+ * from hsot-2 (or 1). Now we have colliding state. We must
+ * send an abort here with colliding state indication.
+ */
+ op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
+ sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0))) {
+ /*
+ * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
+ * should be ok, re-accept peer info
+ */
+ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+ /* Extension of case C.
+ * If we hit this, then the random number
+ * generator returned the same vtag when we
+ * first sent our INIT-ACK and when we later sent
+ * our INIT. The side with the seq numbers that are
+ * different will be the one that normnally would
+ * have hit case C. This in effect "extends" our vtags
+ * in this collision case to be 64 bits. The same collision
+ * could occur aka you get both vtag and seq number the
+ * same twice in a row.. but is much less likely. If it
+ * did happen then we would proceed through and bring
+ * up the assoc.. we may end up with the wrong stream
+ * setup however.. which would be bad.. but there is
+ * no way to tell.. until we send on a stream that does
+ * not exist :-)
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 7;
+
+ return (NULL);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 8;
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
+ sctp_stop_all_cookie_timers(stcb);
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+ NULL);
+ }
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+
+ if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
+ /* Ok the peer probably discarded our
+ * data (if we echoed a cookie+data). So anything
+ * on the sent_queue should be marked for
+ * retransmit, we may not get something to
+ * kick us so it COULD still take a timeout
+ * to move these.. but it can't hurt to mark them.
+ */
+ struct sctp_tmit_chunk *chk;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ spec_flag++;
+ }
+ }
+
+ }
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 9;
+ return (NULL);
+ }
+ if (sctp_load_addresses_from_init(stcb, m,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, src, dst, init_src, stcb->asoc.port)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 10;
+ return (NULL);
+ }
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (!SCTP_IS_LISTENING(inp))) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ else
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ } else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
+ } else {
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (spec_flag) {
+ /* only if we have retrans set do we do this. What
+ * this call does is get only the COOKIE-ACK out
+ * and then when we return the normal call to
+ * sctp_chunk_output will get the retrans out
+ * behind this.
+ */
+ sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 11;
+
+ return (stcb);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
+ cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
+ cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
+ cookie->tie_tag_peer_vtag != 0) {
+ struct sctpasochead *head;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ if (asoc->peer_supports_nat) {
+ /* This is a gross gross hack.
+ * Just call the cookie_new code since we
+ * are allowing a duplicate association.
+ * I hope this works...
+ */
+ return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
+ sh, cookie, cookie_len,
+ inp, netp, init_src,notification,
+ auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port));
+ }
+ /*
+ * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
+ */
+ /* temp code */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 12;
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
+
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_RESTART;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
+ } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
+ SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
+ }
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+
+ } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
+ /* move to OPEN state, if not in SHUTDOWN_SENT */
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ }
+ asoc->pre_open_streams =
+ ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ if (asoc->mapping_array) {
+ memset(asoc->mapping_array, 0,
+ asoc->mapping_array_size);
+ }
+ if (asoc->nr_mapping_array) {
+ memset(asoc->nr_mapping_array, 0,
+ asoc->mapping_array_size);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ SCTP_SOCKET_LOCK(so, 1);
+#endif
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(stcb->sctp_ep);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ /* send up all the data */
+ SCTP_TCB_SEND_LOCK(stcb);
+
+ sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+ for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+ asoc->strmout[i].abandoned_sent[j] = 0;
+ asoc->strmout[i].abandoned_unsent[j] = 0;
+ }
+#else
+ asoc->strmout[i].abandoned_sent[0] = 0;
+ asoc->strmout[i].abandoned_unsent[0] = 0;
+#endif
+ stcb->asoc.strmout[i].sid = i;
+ stcb->asoc.strmout[i].next_mid_ordered = 0;
+ stcb->asoc.strmout[i].next_mid_unordered = 0;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ }
+ /* process the INIT-ACK info (my info) */
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ /* re-insert to new vtag position */
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
+ SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_INP_WUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_WUNLOCK();
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 13;
+
+ return (NULL);
+ }
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (sctp_load_addresses_from_init(stcb, m,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, src, dst, init_src, stcb->asoc.port)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 14;
+
+ return (NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 15;
+
+ return (stcb);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 16;
+ /* all other cases... */
+ return (NULL);
+}
+
+
+/*
+ * handle a state cookie for a new association m: input packet mbuf chain--
+ * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
+ * and the cookie signature does not exist offset: offset into mbuf to the
+ * cookie-echo chunk length: length of the cookie chunk to: where the init
+ * was from returns a new TCB
+ */
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ union sctp_sockstore store;
+ struct sctp_association *asoc;
+ int init_offset, initack_offset, initack_limit;
+ int retval;
+ int error = 0;
+ uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(inp);
+#endif
+
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "process_cookie_new: could not pull INIT chunk hdr\n");
+ return (NULL);
+ }
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
+ return (NULL);
+ }
+ initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
+ return (NULL);
+ }
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ /*
+ * NOTE: We can't use the INIT_ACK's chk_length to determine the
+ * "initack_limit" value. This is because the chk_length field
+ * includes the length of the cookie, but the cookie is omitted when
+ * the INIT and INIT_ACK are tacked onto the cookie...
+ */
+ initack_limit = offset + cookie_len;
+
+ /*
+ * now that we know the INIT/INIT-ACK are in place, create a new TCB
+ * and popluate
+ */
+
+ /*
+ * Here we do a trick, we set in NULL for the proc/thread argument. We
+ * do this since in effect we only use the p argument when
+ * the socket is unbound and we must do an implicit bind.
+ * Since we are getting a cookie, we cannot be unbound.
+ */
+ stcb = sctp_aloc_assoc(inp, init_src, &error,
+ ntohl(initack_cp->init.initiate_tag), vrf_id,
+ ntohs(initack_cp->init.num_outbound_streams),
+ port,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ (struct thread *)NULL,
+#elif defined(_WIN32) && !defined(__Userspace__)
+ (PKTHREAD)NULL,
+#else
+ (struct proc *)NULL,
+#endif
+ SCTP_DONT_INITIALIZE_AUTH_PARAMS);
+ if (stcb == NULL) {
+ struct mbuf *op_err;
+
+ /* memory problem? */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "process_cookie_new: no room for another TCB!\n");
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ return (NULL);
+ }
+ /* get the correct sctp_nets */
+ if (netp)
+ *netp = sctp_findnet(stcb, init_src);
+
+ asoc = &stcb->asoc;
+ /* get scope variables out of cookie */
+ asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
+ asoc->scope.site_scope = cookie->site_scope;
+ asoc->scope.local_scope = cookie->local_scope;
+ asoc->scope.loopback_scope = cookie->loopback_scope;
+
+#if defined(__Userspace__)
+ if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+ (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
+ (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
+#else
+ if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+ (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
+#endif
+ struct mbuf *op_err;
+
+ /*
+ * Houston we have a problem. The EP changed while the
+ * cookie was in flight. Only recourse is to abort the
+ * association.
+ */
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (NULL);
+ }
+ /* process the INIT-ACK info (my info) */
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+
+ /* process the INIT info (peer's info) */
+ if (netp)
+ retval = sctp_process_init(init_cp, stcb);
+ else
+ retval = 0;
+ if (retval < 0) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (NULL);
+ }
+ /* load all addresses */
+ if (sctp_load_addresses_from_init(stcb, m,
+ init_offset + sizeof(struct sctp_init_chunk), initack_offset,
+ src, dst, init_src, port)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (NULL);
+ }
+ /*
+ * verify any preceding AUTH chunk that was skipped
+ */
+ /* pull the local authentication parameters from the cookie/init-ack */
+ sctp_auth_get_cookie_params(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
+ if (auth_skipped) {
+ struct sctp_auth_chunk *auth;
+
+ if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
+ auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
+ } else {
+ auth = NULL;
+ }
+ if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
+ /* auth HMAC failed, dump the assoc and packet */
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "COOKIE-ECHO: AUTH failed\n");
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (NULL);
+ } else {
+ /* remaining chunks checked... good to go */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+
+ /*
+ * if we're doing ASCONFs, check to see if we have any new local
+ * addresses that need to get added to the peer (eg. addresses
+ * changed while cookie echo in flight). This needs to be done
+ * after we go to the OPEN state to do the correct asconf
+ * processing. else, make sure we have the correct addresses in our
+ * lists
+ */
+
+ /* warning, we re-use sin, sin6, sa_store here! */
+ /* pull in local_address (our "from" address) */
+ switch (cookie->laddr_type) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ /* source addr is IPv4 */
+ memset(&store.sin, 0, sizeof(struct sockaddr_in));
+ store.sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ store.sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+ store.sin.sin_addr.s_addr = cookie->laddress[0];
+ break;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ /* source addr is IPv6 */
+ memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
+ store.sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ store.sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ store.sin6.sin6_scope_id = cookie->scope_id;
+ memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
+ break;
+#endif
+#if defined(__Userspace__)
+ case SCTP_CONN_ADDRESS:
+ /* source addr is conn */
+ memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
+ store.sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ store.sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
+ break;
+#endif
+ default:
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (NULL);
+ }
+
+ /* update current state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ sctp_stop_all_cookie_timers(stcb);
+ SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+
+ /* set up to notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (!SCTP_IS_LISTENING(inp))) {
+ /*
+ * This is an endpoint that called connect() how it got a
+ * cookie that is NEW is a bit of a mystery. It must be that
+ * the INIT was sent, but before it got there.. a complete
+ * INIT/INIT-ACK/COOKIE arrived. But of course then it
+ * should have went to the other code.. not here.. oh well..
+ * a bit of protection is worth having..
+ */
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (SCTP_IS_LISTENING(inp))) {
+ /*
+ * We don't want to do anything with this one. Since it is
+ * the listening guy. The timer will get started for
+ * accepted connections in the caller.
+ */
+ ;
+ }
+ /* since we did not send a HB make sure we don't double things */
+ if ((netp) && (*netp))
+ (*netp)->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ if ((netp != NULL) && (*netp != NULL)) {
+ struct timeval old;
+
+ /* calculate the RTT and set the encaps port */
+ old.tv_sec = cookie->time_entered.tv_sec;
+ old.tv_usec = cookie->time_entered.tv_usec;
+ sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_send_cookie_ack(stcb);
+
+ /*
+ * check the address lists for any ASCONFs that need to be sent
+ * AFTER the cookie-ack is sent
+ */
+ sctp_check_address_list(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
+ &store.sa, cookie->local_scope, cookie->site_scope,
+ cookie->ipv4_scope, cookie->loopback_scope);
+
+
+ return (stcb);
+}
+
+/*
+ * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
+ * we NEED to make sure we are not already using the vtag. If so we
+ * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+ SCTP_BASE_INFO(hashasocmark))];
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
+ -- SEND ABORT - TRY AGAIN --
+ }
+ }
+*/
+
+/*
+ * handles a COOKIE-ECHO message stcb: modified to either a new or left as
+ * existing (non-NULL) TCB
+ */
+static struct mbuf *
+sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
+ struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+ struct sctp_tcb **locked_tcb,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_state_cookie *cookie;
+ struct sctp_tcb *l_stcb = *stcb;
+ struct sctp_inpcb *l_inp;
+ struct sockaddr *to;
+ struct sctp_pcb *ep;
+ struct mbuf *m_sig;
+ uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
+ uint8_t *sig;
+ uint8_t cookie_ok = 0;
+ unsigned int sig_offset, cookie_offset;
+ unsigned int cookie_len;
+ struct timeval now;
+ struct timeval time_expires;
+ int notification = 0;
+ struct sctp_nets *netl;
+ int had_a_existing_tcb = 0;
+ int send_int_conf = 0;
+#ifdef INET
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+#if defined(__Userspace__)
+ struct sockaddr_conn sconn;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_cookie: handling COOKIE-ECHO\n");
+
+ if (inp_p == NULL) {
+ return (NULL);
+ }
+ cookie = &cp->cookie;
+ cookie_offset = offset + sizeof(struct sctp_chunkhdr);
+ cookie_len = ntohs(cp->ch.chunk_length);
+
+ if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
+ sizeof(struct sctp_init_chunk) +
+ sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
+ /* cookie too small */
+ return (NULL);
+ }
+ if ((cookie->peerport != sh->src_port) ||
+ (cookie->myport != sh->dest_port) ||
+ (cookie->my_vtag != sh->v_tag)) {
+ /*
+ * invalid ports or bad tag. Note that we always leave the
+ * v_tag in the header in network order and when we stored
+ * it in the my_vtag slot we also left it in network order.
+ * This maintains the match even though it may be in the
+ * opposite byte order of the machine :->
+ */
+ return (NULL);
+ }
+#if defined(__Userspace__)
+ /*
+ * Recover the AF_CONN addresses within the cookie.
+ * This needs to be done in the buffer provided for later processing
+ * of the cookie and in the mbuf chain for HMAC validation.
+ */
+ if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) {
+ struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src;
+
+ memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *));
+ m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address),
+ (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
+ }
+ if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) {
+ struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst;
+
+ memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *));
+ m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress),
+ (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
+ }
+#endif
+ /*
+ * split off the signature into its own mbuf (since it should not be
+ * calculated in the sctp_hmac_m() call).
+ */
+ sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
+ m_sig = m_split(m, sig_offset, M_NOWAIT);
+ if (m_sig == NULL) {
+ /* out of memory or ?? */
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
+ }
+#endif
+
+ /*
+ * compute the signature/digest for the cookie
+ */
+ ep = &(*inp_p)->sctp_ep;
+ l_inp = *inp_p;
+ if (l_stcb) {
+ SCTP_TCB_UNLOCK(l_stcb);
+ }
+ SCTP_INP_RLOCK(l_inp);
+ if (l_stcb) {
+ SCTP_TCB_LOCK(l_stcb);
+ }
+ /* which cookie is it? */
+ if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* it's the old cookie */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ } else {
+ /* it's the current cookie */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ }
+ /* get the signature */
+ SCTP_INP_RUNLOCK(l_inp);
+ sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
+ if (sig == NULL) {
+ /* couldn't find signature */
+ sctp_m_freem(m_sig);
+ return (NULL);
+ }
+ /* compare the received digest with the computed digest */
+ if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
+ /* try the old cookie? */
+ if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* compute digest with old */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ /* compare */
+ if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
+ cookie_ok = 1;
+ }
+ } else {
+ cookie_ok = 1;
+ }
+
+ /*
+ * Now before we continue we must reconstruct our mbuf so that
+ * normal processing of any other chunks will work.
+ */
+ {
+ struct mbuf *m_at;
+
+ m_at = m;
+ while (SCTP_BUF_NEXT(m_at) != NULL) {
+ m_at = SCTP_BUF_NEXT(m_at);
+ }
+ SCTP_BUF_NEXT(m_at) = m_sig;
+ }
+
+ if (cookie_ok == 0) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "offset = %u, cookie_offset = %u, sig_offset = %u\n",
+ (uint32_t) offset, cookie_offset, sig_offset);
+ return (NULL);
+ }
+
+ /*
+ * check the cookie timestamps to be sure it's not stale
+ */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* Expire time is in Ticks, so we convert to seconds */
+ time_expires.tv_sec = cookie->time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
+ time_expires.tv_usec = cookie->time_entered.tv_usec;
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (timercmp(&now, &time_expires, >))
+#else
+ if (timevalcmp(&now, &time_expires, >))
+#endif
+ {
+ /* cookie is stale! */
+ struct mbuf *op_err;
+ struct sctp_error_stale_cookie *cause;
+ struct timeval diff;
+ uint32_t staleness;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
+ 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* Set the len */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
+ cause = mtod(op_err, struct sctp_error_stale_cookie *);
+ cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
+ cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t))));
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ timersub(&now, &time_expires, &diff);
+#else
+ diff = now;
+ timevalsub(&diff, &time_expires);
+#endif
+ if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
+ staleness = UINT32_MAX;
+ } else {
+ staleness = diff.tv_sec * 1000000;
+ }
+ if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
+ staleness += diff.tv_usec;
+ } else {
+ staleness = UINT32_MAX;
+ }
+ cause->stale_time = htonl(staleness);
+ sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, l_inp->fibnum,
+#endif
+ vrf_id, port);
+ return (NULL);
+ }
+ /*
+ * Now we must see with the lookup address if we have an existing
+ * asoc. This will only happen if we were in the COOKIE-WAIT state
+ * and a INIT collided with us and somewhere the peer sent the
+ * cookie on another address besides the single address our assoc
+ * had for him. In this case we will have one of the tie-tags set at
+ * least AND the address field in the cookie can be used to look it
+ * up.
+ */
+ to = NULL;
+ switch (cookie->addr_type) {
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sin6.sin6_port = sh->src_port;
+ sin6.sin6_scope_id = cookie->scope_id;
+ memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
+ sizeof(sin6.sin6_addr.s6_addr));
+ to = (struct sockaddr *)&sin6;
+ break;
+#endif
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin.sin_len = sizeof(sin);
+#endif
+ sin.sin_port = sh->src_port;
+ sin.sin_addr.s_addr = cookie->address[0];
+ to = (struct sockaddr *)&sin;
+ break;
+#endif
+#if defined(__Userspace__)
+ case SCTP_CONN_ADDRESS:
+ memset(&sconn, 0, sizeof(struct sockaddr_conn));
+ sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ sconn.sconn_port = sh->src_port;
+ memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
+ to = (struct sockaddr *)&sconn;
+ break;
+#endif
+ default:
+ /* This should not happen */
+ return (NULL);
+ }
+ if (*stcb == NULL) {
+ /* Yep, lets check */
+ *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
+ if (*stcb == NULL) {
+ /*
+ * We should have only got back the same inp. If we
+ * got back a different ep we have a problem. The
+ * original findep got back l_inp and now
+ */
+ if (l_inp != *inp_p) {
+ SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
+ }
+ } else {
+ if (*locked_tcb == NULL) {
+ /* In this case we found the assoc only
+ * after we locked the create lock. This means
+ * we are in a colliding case and we must make
+ * sure that we unlock the tcb if its one of the
+ * cases where we throw away the incoming packets.
+ */
+ *locked_tcb = *stcb;
+
+ /* We must also increment the inp ref count
+ * since the ref_count flags was set when we
+ * did not find the TCB, now we found it which
+ * reduces the refcount.. we must raise it back
+ * out to balance it all :-)
+ */
+ SCTP_INP_INCR_REF((*stcb)->sctp_ep);
+ if ((*stcb)->sctp_ep != l_inp) {
+ SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
+ (void *)(*stcb)->sctp_ep, (void *)l_inp);
+ }
+ }
+ }
+ }
+
+ cookie_len -= SCTP_SIGNATURE_SIZE;
+ if (*stcb == NULL) {
+ /* this is the "normal" case... get a new TCB */
+ *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
+ cookie, cookie_len, *inp_p,
+ netp, to, &notification,
+ auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ } else {
+ /* this is abnormal... cookie-echo on existing TCB */
+ had_a_existing_tcb = 1;
+ *stcb = sctp_process_cookie_existing(m, iphlen, offset,
+ src, dst, sh,
+ cookie, cookie_len, *inp_p, *stcb, netp, to,
+ &notification, auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ }
+
+ if (*stcb == NULL) {
+ /* still no TCB... must be bad cookie-echo */
+ return (NULL);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (*netp != NULL) {
+ (*netp)->flowtype = mflowtype;
+ (*netp)->flowid = mflowid;
+ }
+#endif
+ /*
+ * Ok, we built an association so confirm the address we sent the
+ * INIT-ACK to.
+ */
+ netl = sctp_findnet(*stcb, to);
+ /*
+ * This code should in theory NOT run but
+ */
+ if (netl == NULL) {
+ /* TSNH! Huh, why do I need to add this address here? */
+ if (sctp_add_remote_addr(*stcb, to, NULL, port,
+ SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
+ return (NULL);
+ }
+ netl = sctp_findnet(*stcb, to);
+ }
+ if (netl) {
+ if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
+ netl);
+ send_int_conf = 1;
+ }
+ }
+ sctp_start_net_timers(*stcb);
+ if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ if (!had_a_existing_tcb ||
+ (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /*
+ * If we have a NEW cookie or the connect never
+ * reached the connected state during collision we
+ * must do the TCP accept thing.
+ */
+ struct socket *so, *oso;
+ struct sctp_inpcb *inp;
+
+ if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
+ /*
+ * For a restart we will keep the same
+ * socket, no need to do anything. I THINK!!
+ */
+ sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ if (send_int_conf) {
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+ }
+ return (m);
+ }
+ oso = (*inp_p)->sctp_socket;
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_SET(oso->so_vnet);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(oso, 1);
+#endif
+ so = sonewconn(oso, 0
+#if defined(__APPLE__) && !defined(__Userspace__)
+ ,NULL
+#endif
+ );
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(oso, 1);
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_RESTORE();
+#endif
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+ if (so == NULL) {
+ struct mbuf *op_err;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *pcb_so;
+#endif
+ /* Too many sockets */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_association(*inp_p, NULL, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ pcb_so = SCTP_INP_SO(*inp_p);
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+ SCTP_SOCKET_LOCK(pcb_so, 1);
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(pcb_so, 1);
+#endif
+ return (NULL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_INCR_REF(inp);
+ /*
+ * We add the unbound flag here so that
+ * if we get an soabort() before we get the
+ * move_pcb done, we will properly cleanup.
+ */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL |
+ SCTP_PCB_FLAGS_UNBOUND |
+ (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
+ SCTP_PCB_FLAGS_DONT_WAKE);
+ inp->sctp_features = (*inp_p)->sctp_features;
+ inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
+ inp->sctp_socket = so;
+ inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
+ inp->max_cwnd = (*inp_p)->max_cwnd;
+ inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
+ inp->ecn_supported = (*inp_p)->ecn_supported;
+ inp->prsctp_supported = (*inp_p)->prsctp_supported;
+ inp->auth_supported = (*inp_p)->auth_supported;
+ inp->asconf_supported = (*inp_p)->asconf_supported;
+ inp->reconfig_supported = (*inp_p)->reconfig_supported;
+ inp->nrsack_supported = (*inp_p)->nrsack_supported;
+ inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
+ inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
+ inp->sctp_context = (*inp_p)->sctp_context;
+ inp->local_strreset_support = (*inp_p)->local_strreset_support;
+ inp->fibnum = (*inp_p)->fibnum;
+ inp->inp_starting_point_for_iterator = NULL;
+#if defined(__Userspace__)
+ inp->ulp_info = (*inp_p)->ulp_info;
+ inp->recv_callback = (*inp_p)->recv_callback;
+ inp->send_callback = (*inp_p)->send_callback;
+ inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
+#endif
+ /*
+ * copy in the authentication parameters from the
+ * original endpoint
+ */
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
+ if (inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
+
+ /*
+ * Now we must move it from one hash table to
+ * another and get the tcb in the right place.
+ */
+
+ /* This is where the one-2-one socket is put into
+ * the accept state waiting for the accept!
+ */
+ if (*stcb) {
+ SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
+ }
+ sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
+
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
+ 0);
+#else
+ sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
+#endif
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+
+ /* now we must check to see if we were aborted while
+ * the move was going on and the lock/unlock happened.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* yep it was, we leave the
+ * assoc attached to the socket since
+ * the sctp_inpcb_free() call will send
+ * an abort for us.
+ */
+ SCTP_INP_DECR_REF(inp);
+ return (NULL);
+ }
+ SCTP_INP_DECR_REF(inp);
+ /* Switch over to the new guy */
+ *inp_p = inp;
+ sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ if (send_int_conf) {
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+ }
+
+ /* Pull it from the incomplete queue and wake the guy */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+ SCTP_SOCKET_LOCK(so, 1);
+#endif
+ soisconnected(so);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (m);
+ }
+ }
+ if (notification) {
+ sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ if (send_int_conf) {
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+ }
+ return (m);
+}
+
+static void
+sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* cp must not be used, others call this without a c-ack :-) */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+
+ asoc = &stcb->asoc;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ asoc->overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ asoc->overall_error_count = 0;
+ sctp_stop_all_cookie_timers(stcb);
+ /* process according to association state */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
+ /* state change only needed when I am in right state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
+ sctp_start_net_timers(stcb);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+
+ }
+ /* update RTO */
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ if (asoc->overall_error_count == 0) {
+ sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
+ SCTP_RTT_FROM_NON_DATA);
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+#endif
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
+ soisconnected(stcb->sctp_socket);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* We don't need to do the asconf thing,
+ * nor hb or autoclose if the socket is closed.
+ */
+ goto closed_socket;
+ }
+
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, net);
+
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ /*
+ * send ASCONF if parameters are pending and ASCONFs are
+ * allowed (eg. addresses changed when init/cookie echo were
+ * in flight)
+ */
+ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ (stcb->asoc.asconf_supported == 1) &&
+ (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ }
+closed_socket:
+ /* Toss the cookie if I can */
+ sctp_toss_old_cookies(stcb, asoc);
+ /* Restart the timer if we have pending data */
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (chk != NULL) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+ }
+}
+
+static void
+sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ struct sctp_tmit_chunk *lchk;
+ struct sctp_ecne_chunk bkup;
+ uint8_t override_bit;
+ uint32_t tsn, window_data_tsn;
+ int len;
+ unsigned int pkt_cnt;
+
+ len = ntohs(cp->ch.chunk_length);
+ if ((len != sizeof(struct sctp_ecne_chunk)) &&
+ (len != sizeof(struct old_sctp_ecne_chunk))) {
+ return;
+ }
+ if (len == sizeof(struct old_sctp_ecne_chunk)) {
+ /* Its the old format */
+ memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
+ bkup.num_pkts_since_cwr = htonl(1);
+ cp = &bkup;
+ }
+ SCTP_STAT_INCR(sctps_recvecne);
+ tsn = ntohl(cp->tsn);
+ pkt_cnt = ntohl(cp->num_pkts_since_cwr);
+ lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
+ if (lchk == NULL) {
+ window_data_tsn = stcb->asoc.sending_seq - 1;
+ } else {
+ window_data_tsn = lchk->rec.data.tsn;
+ }
+
+ /* Find where it was sent to if possible. */
+ net = NULL;
+ TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
+ if (lchk->rec.data.tsn == tsn) {
+ net = lchk->whoTo;
+ net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
+ break;
+ }
+ if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
+ break;
+ }
+ }
+ if (net == NULL) {
+ /*
+ * What to do. A previous send of a
+ * CWR was possibly lost. See how old it is, we
+ * may have it marked on the actual net.
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (tsn == net->last_cwr_tsn) {
+ /* Found him, send it off */
+ break;
+ }
+ }
+ if (net == NULL) {
+ /*
+ * If we reach here, we need to send a special
+ * CWR that says hey, we did this a long time
+ * ago and you lost the response.
+ */
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ /* TSNH */
+ return;
+ }
+ override_bit = SCTP_CWR_REDUCE_OVERRIDE;
+ } else {
+ override_bit = 0;
+ }
+ } else {
+ override_bit = 0;
+ }
+ if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
+ ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
+ /* JRS - Use the congestion control given in the pluggable CC module */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
+ /*
+ * We reduce once every RTT. So we will only lower cwnd at
+ * the next sending seq i.e. the window_data_tsn
+ */
+ net->cwr_window_tsn = window_data_tsn;
+ net->ecn_ce_pkt_cnt += pkt_cnt;
+ net->lost_cnt = pkt_cnt;
+ net->last_cwr_tsn = tsn;
+ } else {
+ override_bit |= SCTP_CWR_IN_SAME_WINDOW;
+ if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
+ ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
+ /*
+ * Another loss in the same window update how
+ * many marks/packets lost we have had.
+ */
+ int cnt = 1;
+ if (pkt_cnt > net->lost_cnt) {
+ /* Should be the case */
+ cnt = (pkt_cnt - net->lost_cnt);
+ net->ecn_ce_pkt_cnt += cnt;
+ }
+ net->lost_cnt = pkt_cnt;
+ net->last_cwr_tsn = tsn;
+ /*
+ * Most CC functions will ignore this call, since we are in-window
+ * yet of the initial CE the peer saw.
+ */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
+ }
+ }
+ /*
+ * We always send a CWR this way if our previous one was lost our
+ * peer will get an update, or if it is not time again to reduce we
+ * still get the cwr to the peer. Note we set the override when we
+ * could not find the TSN on the chunk or the destination network.
+ */
+ sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
+}
+
+static void
+sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*
+ * Here we get a CWR from the peer. We must look in the outqueue and
+ * make sure that we have a covered ECNE in the control chunk part.
+ * If so remove it.
+ */
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_ecne_chunk *ecne;
+ int override;
+ uint32_t cwr_tsn;
+
+ cwr_tsn = ntohl(cp->tsn);
+ override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
+ TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
+ if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
+ continue;
+ }
+ if ((override == 0) && (chk->whoTo != net)) {
+ /* Must be from the right src unless override is set */
+ continue;
+ }
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
+ /* this covers this ECNE, we can remove it */
+ stcb->asoc.ecn_echo_cnt_onq--;
+ TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
+ sctp_next);
+ stcb->asoc.ctrl_queue_cnt--;
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ if (override == 0) {
+ break;
+ }
+ }
+ }
+}
+
+static void
+sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
+ if (stcb == NULL)
+ return;
+
+ /* process according to association state */
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* unexpected SHUTDOWN-COMPLETE... so ignore... */
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+#ifdef INVARIANTS
+ if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
+ !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
+ sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
+ panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
+ }
+#endif
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB */
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: calls free-asoc\n");
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return;
+}
+
+static int
+process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
+ struct sctp_nets *net, uint8_t flg)
+{
+ switch (desc->chunk_type) {
+ case SCTP_DATA:
+ case SCTP_IDATA:
+ /* find the tsn to resend (possibly) */
+ {
+ uint32_t tsn;
+ struct sctp_tmit_chunk *tp1;
+
+ tsn = ntohl(desc->tsn_ifany);
+ TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+ if (tp1->rec.data.tsn == tsn) {
+ /* found it */
+ break;
+ }
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
+ /* not found */
+ tp1 = NULL;
+ break;
+ }
+ }
+ if (tp1 == NULL) {
+ /*
+ * Do it the other way , aka without paying
+ * attention to queue seq order.
+ */
+ SCTP_STAT_INCR(sctps_pdrpdnfnd);
+ TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+ if (tp1->rec.data.tsn == tsn) {
+ /* found it */
+ break;
+ }
+ }
+ }
+ if (tp1 == NULL) {
+ SCTP_STAT_INCR(sctps_pdrptsnnf);
+ }
+ if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
+ if (((flg & SCTP_BADCRC) == 0) &&
+ ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+ return (0);
+ }
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+ SCTP_STAT_INCR(sctps_pdrpdiwnp);
+ return (0);
+ }
+ if (stcb->asoc.peers_rwnd == 0 &&
+ (flg & SCTP_FROM_MIDDLE_BOX)) {
+ SCTP_STAT_INCR(sctps_pdrpdizrw);
+ return (0);
+ }
+ if ((uint32_t)SCTP_BUF_LEN(tp1->data) <
+ SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) {
+ /* Payload not matching. */
+ SCTP_STAT_INCR(sctps_pdrpbadd);
+ return (-1);
+ }
+ if (memcmp(mtod(tp1->data, caddr_t) + SCTP_DATA_CHUNK_OVERHEAD(stcb),
+ desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) {
+ /* Payload not matching. */
+ SCTP_STAT_INCR(sctps_pdrpbadd);
+ return (-1);
+ }
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation
+ * pending on it, cancel it
+ */
+ if (tp1->whoTo->rto_needed == 0) {
+ tp1->whoTo->rto_needed = 1;
+ }
+ tp1->do_rtt = 0;
+ }
+ SCTP_STAT_INCR(sctps_pdrpmark);
+ if (tp1->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ /*
+ * mark it as if we were doing a FR, since
+ * we will be getting gap ack reports behind
+ * the info from the router.
+ */
+ tp1->rec.data.doing_fast_retransmit = 1;
+ /*
+ * mark the tsn with what sequences can
+ * cause a new FR.
+ */
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
+ }
+
+ /* restart the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo);
+
+ /* fix counts and things */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uint32_t)(uintptr_t)stcb,
+ tp1->rec.data.tsn);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->sent = SCTP_DATAGRAM_RESEND;
+ } {
+ /* audit code */
+ unsigned int audit;
+
+ audit = 0;
+ TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ if (audit != stcb->asoc.sent_queue_retran_cnt) {
+ SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
+ audit, stcb->asoc.sent_queue_retran_cnt);
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = audit;
+#endif
+ }
+ }
+ }
+ break;
+ case SCTP_ASCONF:
+ {
+ struct sctp_tmit_chunk *asconf;
+
+ TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
+ break;
+ }
+ }
+ if (asconf) {
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+ asconf->snd_count--;
+ }
+ }
+ break;
+ case SCTP_INITIATION:
+ /* resend the INIT */
+ stcb->asoc.dropped_special_cnt++;
+ if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
+ /*
+ * If we can get it in, in a few attempts we do
+ * this, otherwise we let the timer fire.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK:
+ /* resend the sack */
+ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_HEARTBEAT_REQUEST:
+ /* resend a demand HB */
+ if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
+ /* Only retransmit if we KNOW we wont destroy the tcb */
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_SHUTDOWN:
+ sctp_send_shutdown(stcb, net);
+ break;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_ack(stcb, net);
+ break;
+ case SCTP_COOKIE_ECHO:
+ {
+ struct sctp_tmit_chunk *cookie;
+
+ cookie = NULL;
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie) {
+ if (cookie->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ sctp_stop_all_cookie_timers(stcb);
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+ sctp_send_cookie_ack(stcb);
+ break;
+ case SCTP_ASCONF_ACK:
+ /* resend last asconf ack */
+ sctp_send_asconf_ack(stcb);
+ break;
+ case SCTP_IFORWARD_CUM_TSN:
+ case SCTP_FORWARD_CUM_TSN:
+ send_forward_tsn(stcb, &stcb->asoc);
+ break;
+ /* can't do anything with these */
+ case SCTP_PACKET_DROPPED:
+ case SCTP_INITIATION_ACK: /* this should not happen */
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ default:
+ break;
+ }
+ return (0);
+}
+
+void
+sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+ uint32_t i;
+ uint16_t temp;
+
+ /*
+ * We set things to 0xffffffff since this is the last delivered sequence
+ * and we will be sending in 0 after the reset.
+ */
+
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamincnt) {
+ continue;
+ }
+ stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
+ }
+ } else {
+ list = NULL;
+ for (i = 0; i < stcb->asoc.streamincnt; i++) {
+ stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+ uint32_t i;
+ uint16_t temp;
+
+ if (number_entries > 0) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamoutcnt) {
+ /* no such stream */
+ continue;
+ }
+ stcb->asoc.strmout[temp].next_mid_ordered = 0;
+ stcb->asoc.strmout[temp].next_mid_unordered = 0;
+ }
+ } else {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].next_mid_ordered = 0;
+ stcb->asoc.strmout[i].next_mid_unordered = 0;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+ uint32_t i;
+ uint16_t temp;
+
+ if (number_entries > 0) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamoutcnt) {
+ /* no such stream */
+ continue;
+ }
+ stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
+ }
+ } else {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
+ }
+ }
+}
+
+
+struct sctp_stream_reset_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
+{
+ struct sctp_association *asoc;
+ struct sctp_chunkhdr *ch;
+ struct sctp_stream_reset_request *r;
+ struct sctp_tmit_chunk *chk;
+ int len, clen;
+
+ asoc = &stcb->asoc;
+ if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ asoc->stream_reset_outstanding = 0;
+ return (NULL);
+ }
+ if (stcb->asoc.str_reset == NULL) {
+ asoc->stream_reset_outstanding = 0;
+ return (NULL);
+ }
+ chk = stcb->asoc.str_reset;
+ if (chk->data == NULL) {
+ return (NULL);
+ }
+ if (bchk) {
+ /* he wants a copy of the chk pointer */
+ *bchk = chk;
+ }
+ clen = chk->send_size;
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ r = (struct sctp_stream_reset_request *)(ch + 1);
+ if (ntohl(r->request_seq) == seq) {
+ /* found it */
+ return (r);
+ }
+ len = SCTP_SIZE32(ntohs(r->ph.param_length));
+ if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
+ /* move to the next one, there can only be a max of two */
+ r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
+ if (ntohl(r->request_seq) == seq) {
+ return (r);
+ }
+ }
+ /* that seq is not here */
+ return (NULL);
+}
+
+static void
+sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+
+ asoc = &stcb->asoc;
+ chk = asoc->str_reset;
+ if (chk == NULL) {
+ return;
+ }
+ asoc->str_reset = NULL;
+ sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
+ NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+}
+
+
+static int
+sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
+ uint32_t seq, uint32_t action,
+ struct sctp_stream_reset_response *respin)
+{
+ uint16_t type;
+ int lparam_len;
+ struct sctp_association *asoc = &stcb->asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_reset_request *req_param;
+ struct sctp_stream_reset_out_request *req_out_param;
+ struct sctp_stream_reset_in_request *req_in_param;
+ uint32_t number_entries;
+
+ if (asoc->stream_reset_outstanding == 0) {
+ /* duplicate */
+ return (0);
+ }
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ req_param = sctp_find_stream_reset(stcb, seq, &chk);
+ if (req_param != NULL) {
+ stcb->asoc.str_reset_seq_out++;
+ type = ntohs(req_param->ph.param_type);
+ lparam_len = ntohs(req_param->ph.param_length);
+ if (type == SCTP_STR_RESET_OUT_REQUEST) {
+ int no_clear = 0;
+
+ req_out_param = (struct sctp_stream_reset_out_request *)req_param;
+ number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
+ asoc->stream_reset_out_is_outstanding = 0;
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+ /* do it */
+ sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
+ } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+ } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
+ /* Set it up so we don't stop retransmitting */
+ asoc->stream_reset_outstanding++;
+ stcb->asoc.str_reset_seq_out--;
+ asoc->stream_reset_out_is_outstanding = 1;
+ no_clear = 1;
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+ }
+ if (no_clear == 0) {
+ sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
+ }
+ } else if (type == SCTP_STR_RESET_IN_REQUEST) {
+ req_in_param = (struct sctp_stream_reset_in_request *)req_param;
+ number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
+ number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+ } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
+ number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+ }
+ } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
+ /* Ok we now may have more streams */
+ int num_stream;
+
+ num_stream = stcb->asoc.strm_pending_add_size;
+ if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
+ /* TSNH */
+ num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
+ }
+ stcb->asoc.strm_pending_add_size = 0;
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+ /* Put the new streams into effect */
+ int i;
+ for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
+ asoc->strmout[i].state = SCTP_STREAM_OPEN;
+ }
+ asoc->streamoutcnt += num_stream;
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
+ } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+ SCTP_STREAM_CHANGE_DENIED);
+ } else {
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+ SCTP_STREAM_CHANGE_FAILED);
+ }
+ } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+ SCTP_STREAM_CHANGE_DENIED);
+ } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+ SCTP_STREAM_CHANGE_FAILED);
+ }
+ } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
+ /**
+ * a) Adopt the new in tsn.
+ * b) reset the map
+ * c) Adopt the new out-tsn
+ */
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_forward_tsn_chunk fwdtsn;
+ int abort_flag = 0;
+ if (respin == NULL) {
+ /* huh ? */
+ return (0);
+ }
+ if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
+ return (0);
+ }
+ if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+ resp = (struct sctp_stream_reset_response_tsn *)respin;
+ asoc->stream_reset_outstanding--;
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+ if (abort_flag) {
+ return (1);
+ }
+ stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+
+ stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+
+ stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+
+ stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
+ stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
+
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+ sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
+ } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+ sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
+ SCTP_ASSOC_RESET_DENIED);
+ } else {
+ sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
+ SCTP_ASSOC_RESET_FAILED);
+ }
+ }
+ /* get rid of the request and get the request flags */
+ if (asoc->stream_reset_outstanding == 0) {
+ sctp_clean_up_stream_reset(stcb);
+ }
+ }
+ }
+ if (asoc->stream_reset_outstanding == 0) {
+ sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_in_request *req, int trunc)
+{
+ uint32_t seq;
+ int len, i;
+ int number_entries;
+ uint16_t temp;
+
+ /*
+ * peer wants me to send a str-reset to him for my outgoing seq's if
+ * seq_in is right.
+ */
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if (trunc) {
+ /* Can't do it, since they exceeded our buffer size */
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(req->list_of_streams[i]);
+ if (temp >= stcb->asoc.streamoutcnt) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ goto bad_boy;
+ }
+ req->list_of_streams[i] = temp;
+ }
+ for (i = 0; i < number_entries; i++) {
+ if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
+ stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
+ }
+ }
+ } else {
+ /* Its all */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
+ stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
+ }
+ }
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+ } else {
+ /* Can't do it, since we have sent one out */
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
+ }
+ bad_boy:
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+ }
+ sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
+}
+
+static int
+sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_tsn_request *req)
+{
+ /* reset all in and out and update the tsn */
+ /*
+ * A) reset my str-seq's on in and out. B) Select a receive next,
+ * and set cum-ack to it. Also process this selected number as a
+ * fwd-tsn as well. C) set in the response my next sending seq.
+ */
+ struct sctp_forward_tsn_chunk fwdtsn;
+ struct sctp_association *asoc = &stcb->asoc;
+ int abort_flag = 0;
+ uint32_t seq;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else {
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.ch.chunk_flags = 0;
+ fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+ if (abort_flag) {
+ return (1);
+ }
+ asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
+ asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
+ memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
+ memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
+ atomic_add_int(&asoc->sending_seq, 1);
+ /* save off historical data for retrans */
+ asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
+ asoc->last_sending_seq[0] = asoc->sending_seq;
+ asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
+ asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+ sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
+ }
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+ asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+ asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
+ asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_out_request *req, int trunc)
+{
+ uint32_t seq, tsn;
+ int number_entries, len;
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+
+ /* now if its not a duplicate we process it */
+ if (asoc->str_reset_seq_in == seq) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
+ /*
+ * the sender is resetting, handle the list issue.. we must
+ * a) verify if we can do the reset, if so no problem b) If
+ * we can't do the reset we must copy the request. c) queue
+ * it, and setup the data in processor to trigger it off
+ * when needed and dequeue all the queued data.
+ */
+ tsn = ntohl(req->send_reset_at_tsn);
+
+ /* move the reset action back one */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if (trunc) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
+ /* we can do it now */
+ sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+ } else {
+ /*
+ * we must queue it up and thus wait for the TSN's
+ * to arrive that are at or before tsn
+ */
+ struct sctp_stream_reset_list *liste;
+ int siz;
+
+ siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
+ SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
+ siz, SCTP_M_STRESET);
+ if (liste == NULL) {
+ /* gak out of memory */
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ return;
+ }
+ liste->seq = seq;
+ liste->tsn = tsn;
+ liste->number_entries = number_entries;
+ memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
+ TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
+ }
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ asoc->str_reset_seq_in++;
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+ }
+}
+
+static void
+sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_add_strm *str_add)
+{
+ /*
+ * Peer is requesting to add more streams.
+ * If its within our max-streams we will
+ * allow it.
+ */
+ uint32_t num_stream, i;
+ uint32_t seq;
+ struct sctp_association *asoc = &stcb->asoc;
+ struct sctp_queued_to_read *ctl, *nctl;
+
+ /* Get the number. */
+ seq = ntohl(str_add->request_seq);
+ num_stream = ntohs(str_add->number_of_streams);
+ /* Now what would be the new total? */
+ if (asoc->str_reset_seq_in == seq) {
+ num_stream += stcb->asoc.streamincnt;
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if ((num_stream > stcb->asoc.max_inbound_streams) ||
+ (num_stream > 0xffff)) {
+ /* We must reject it they ask for to many */
+ denied:
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else {
+ /* Ok, we can do that :-) */
+ struct sctp_stream_in *oldstrm;
+
+ /* save off the old */
+ oldstrm = stcb->asoc.strmin;
+ SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
+ (num_stream * sizeof(struct sctp_stream_in)),
+ SCTP_M_STRMI);
+ if (stcb->asoc.strmin == NULL) {
+ stcb->asoc.strmin = oldstrm;
+ goto denied;
+ }
+ /* copy off the old data */
+ for (i = 0; i < stcb->asoc.streamincnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+ TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
+ stcb->asoc.strmin[i].sid = i;
+ stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
+ stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
+ stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
+ /* now anything on those queues? */
+ TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
+ TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
+ }
+ TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
+ TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
+ }
+ }
+ /* Init the new streams */
+ for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
+ TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+ TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
+ stcb->asoc.strmin[i].sid = i;
+ stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
+ stcb->asoc.strmin[i].pd_api_started = 0;
+ stcb->asoc.strmin[i].delivery_started = 0;
+ }
+ SCTP_FREE(oldstrm, SCTP_M_STRMI);
+ /* update the size */
+ stcb->asoc.streamincnt = num_stream;
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+ sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
+ }
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ asoc->str_reset_seq_in++;
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+
+ }
+}
+
+static void
+sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_add_strm *str_add)
+{
+ /*
+ * Peer is requesting to add more streams.
+ * If its within our max-streams we will
+ * allow it.
+ */
+ uint16_t num_stream;
+ uint32_t seq;
+ struct sctp_association *asoc = &stcb->asoc;
+
+ /* Get the number. */
+ seq = ntohl(str_add->request_seq);
+ num_stream = ntohs(str_add->number_of_streams);
+ /* Now what would be the new total? */
+ if (asoc->str_reset_seq_in == seq) {
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ } else if (stcb->asoc.stream_reset_outstanding) {
+ /* We must reject it we have something pending */
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
+ } else {
+ /* Ok, we can do that :-) */
+ int mychk;
+ mychk = stcb->asoc.streamoutcnt;
+ mychk += num_stream;
+ if (mychk < 0x10000) {
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+ if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ }
+ } else {
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+ }
+ }
+ sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
+ asoc->str_reset_seq_in++;
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+ }
+}
+
+#ifdef __GNUC__
+__attribute__ ((noinline))
+#endif
+static int
+sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ struct sctp_chunkhdr *ch_req)
+{
+ uint16_t remaining_length, param_len, ptype;
+ struct sctp_paramhdr pstore;
+ uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
+ uint32_t seq = 0;
+ int num_req = 0;
+ int trunc = 0;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ struct sctp_paramhdr *ph;
+ int ret_code = 0;
+ int num_param = 0;
+
+ /* now it may be a reset or a reset-response */
+ remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
+
+ /* setup for adding the response */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return (ret_code);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->no_fr_allowed = 0;
+ chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->book_size_scale = 0;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ strres_nochunk:
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return (ret_code);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = NULL;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ offset += sizeof(struct sctp_chunkhdr);
+ while (remaining_length >= sizeof(struct sctp_paramhdr)) {
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
+ if (ph == NULL) {
+ /* TSNH */
+ break;
+ }
+ param_len = ntohs(ph->param_length);
+ if ((param_len > remaining_length) ||
+ (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
+ /* bad parameter length */
+ break;
+ }
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
+ (uint8_t *)&cstore);
+ if (ph == NULL) {
+ /* TSNH */
+ break;
+ }
+ ptype = ntohs(ph->param_type);
+ num_param++;
+ if (param_len > sizeof(cstore)) {
+ trunc = 1;
+ } else {
+ trunc = 0;
+ }
+ if (num_param > SCTP_MAX_RESET_PARAMS) {
+ /* hit the max of parameters already sorry.. */
+ break;
+ }
+ if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
+ struct sctp_stream_reset_out_request *req_out;
+
+ if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
+ break;
+ }
+ req_out = (struct sctp_stream_reset_out_request *)ph;
+ num_req++;
+ if (stcb->asoc.stream_reset_outstanding) {
+ seq = ntohl(req_out->response_seq);
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ /* implicit ack */
+ (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
+ }
+ }
+ sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
+ } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
+ struct sctp_stream_reset_add_strm *str_add;
+
+ if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
+ break;
+ }
+ str_add = (struct sctp_stream_reset_add_strm *)ph;
+ num_req++;
+ sctp_handle_str_reset_add_strm(stcb, chk, str_add);
+ } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
+ struct sctp_stream_reset_add_strm *str_add;
+
+ if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
+ break;
+ }
+ str_add = (struct sctp_stream_reset_add_strm *)ph;
+ num_req++;
+ sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
+ } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
+ struct sctp_stream_reset_in_request *req_in;
+
+ num_req++;
+ req_in = (struct sctp_stream_reset_in_request *)ph;
+ sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
+ } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
+ struct sctp_stream_reset_tsn_request *req_tsn;
+
+ num_req++;
+ req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
+ if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ /* no more */
+ break;
+ } else if (ptype == SCTP_STR_RESET_RESPONSE) {
+ struct sctp_stream_reset_response *resp;
+ uint32_t result;
+
+ if (param_len < sizeof(struct sctp_stream_reset_response)) {
+ break;
+ }
+ resp = (struct sctp_stream_reset_response *)ph;
+ seq = ntohl(resp->response_seq);
+ result = ntohl(resp->result);
+ if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ } else {
+ break;
+ }
+ offset += SCTP_SIZE32(param_len);
+ if (remaining_length >= SCTP_SIZE32(param_len)) {
+ remaining_length -= SCTP_SIZE32(param_len);
+ } else {
+ remaining_length = 0;
+ }
+ }
+ if (num_req == 0) {
+ /* we have no response free the stuff */
+ goto strres_nochunk;
+ }
+ /* ok we have a chunk to link in */
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
+ chk,
+ sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ return (ret_code);
+}
+
+/*
+ * Handle a router or endpoints report of a packet loss, there are two ways
+ * to handle this, either we get the whole packet and must disect it
+ * ourselves (possibly with truncation and or corruption) or it is a summary
+ * from a middle box that did the disectting for us.
+ */
+static void
+sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
+{
+ struct sctp_chunk_desc desc;
+ struct sctp_chunkhdr *chk_hdr;
+ struct sctp_data_chunk *data_chunk;
+ struct sctp_idata_chunk *idata_chunk;
+ uint32_t bottle_bw, on_queue;
+ uint32_t offset, chk_len;
+ uint16_t trunc_len;
+ uint16_t pktdrp_len;
+ uint8_t pktdrp_flags;
+
+ KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit,
+ ("PKTDROP chunk too small"));
+ pktdrp_flags = cp->ch.chunk_flags;
+ pktdrp_len = ntohs(cp->ch.chunk_length);
+ KASSERT(limit <= pktdrp_len, ("Inconsistent limit"));
+ if (pktdrp_flags & SCTP_PACKET_TRUNCATED) {
+ trunc_len = ntohs(cp->trunc_len);
+ if (trunc_len <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) {
+ /* The peer plays games with us. */
+ return;
+ }
+ } else {
+ trunc_len = 0;
+ }
+ limit -= sizeof(struct sctp_pktdrop_chunk);
+ offset = 0;
+ if (offset == limit) {
+ if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
+ SCTP_STAT_INCR(sctps_pdrpbwrpt);
+ }
+ } else if (offset + sizeof(struct sctphdr) > limit) {
+ /* Only a partial SCTP common header. */
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ offset = limit;
+ } else {
+ /* XXX: Check embedded SCTP common header. */
+ offset += sizeof(struct sctphdr);
+ }
+ /* Now parse through the chunks themselves. */
+ while (offset < limit) {
+ if (offset + sizeof(struct sctp_chunkhdr) > limit) {
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ break;
+ }
+ chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset);
+ desc.chunk_type = chk_hdr->chunk_type;
+ /* get amount we need to move */
+ chk_len = (uint32_t)ntohs(chk_hdr->chunk_length);
+ if (chk_len < sizeof(struct sctp_chunkhdr)) {
+ /* Someone is lying... */
+ break;
+ }
+ if (desc.chunk_type == SCTP_DATA) {
+ if (stcb->asoc.idata_supported) {
+ /* Some is playing games with us. */
+ break;
+ }
+ if (chk_len <= sizeof(struct sctp_data_chunk)) {
+ /* Some is playing games with us. */
+ break;
+ }
+ if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) {
+ /* Not enough data bytes available in the chunk. */
+ SCTP_STAT_INCR(sctps_pdrpnedat);
+ goto next_chunk;
+ }
+ if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
+ /* Not enough data in buffer. */
+ break;
+ }
+ data_chunk = (struct sctp_data_chunk *)(cp->data + offset);
+ memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
+ desc.tsn_ifany = data_chunk->dp.tsn;
+ if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
+ SCTP_STAT_INCR(sctps_pdrpmbda);
+ }
+ } else if (desc.chunk_type == SCTP_IDATA) {
+ if (!stcb->asoc.idata_supported) {
+ /* Some is playing games with us. */
+ break;
+ }
+ if (chk_len <= sizeof(struct sctp_idata_chunk)) {
+ /* Some is playing games with us. */
+ break;
+ }
+ if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) {
+ /* Not enough data bytes available in the chunk. */
+ SCTP_STAT_INCR(sctps_pdrpnedat);
+ goto next_chunk;
+ }
+ if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
+ /* Not enough data in buffer. */
+ break;
+ }
+ idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset);
+ memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
+ desc.tsn_ifany = idata_chunk->dp.tsn;
+ if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
+ SCTP_STAT_INCR(sctps_pdrpmbda);
+ }
+ } else {
+ if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
+ SCTP_STAT_INCR(sctps_pdrpmbct);
+ }
+ }
+ if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) {
+ SCTP_STAT_INCR(sctps_pdrppdbrk);
+ break;
+ }
+next_chunk:
+ offset += SCTP_SIZE32(chk_len);
+ }
+ /* Now update any rwnd --- possibly */
+ if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
+ /* From a peer, we get a rwnd report */
+ uint32_t a_rwnd;
+
+ SCTP_STAT_INCR(sctps_pdrpfehos);
+
+ bottle_bw = ntohl(cp->bottle_bw);
+ on_queue = ntohl(cp->current_onq);
+ if (bottle_bw && on_queue) {
+ /* a rwnd report is in here */
+ if (bottle_bw > on_queue)
+ a_rwnd = bottle_bw - on_queue;
+ else
+ a_rwnd = 0;
+
+ if (a_rwnd == 0)
+ stcb->asoc.peers_rwnd = 0;
+ else {
+ if (a_rwnd > stcb->asoc.total_flight) {
+ stcb->asoc.peers_rwnd =
+ a_rwnd - stcb->asoc.total_flight;
+ } else {
+ stcb->asoc.peers_rwnd = 0;
+ }
+ if (stcb->asoc.peers_rwnd <
+ stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ stcb->asoc.peers_rwnd = 0;
+ }
+ }
+ }
+ } else {
+ SCTP_STAT_INCR(sctps_pdrpfmbox);
+ }
+
+ /* now middle boxes in sat networks get a cwnd bump */
+ if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) &&
+ (stcb->asoc.sat_t3_loss_recovery == 0) &&
+ (stcb->asoc.sat_network)) {
+ /*
+ * This is debatable but for sat networks it makes sense
+ * Note if a T3 timer has went off, we will prohibit any
+ * changes to cwnd until we exit the t3 loss recovery.
+ */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
+ net, cp, &bottle_bw, &on_queue);
+ }
+}
+
+/*
+ * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
+ * still contain IP/SCTP header - stcb: is the tcb found for this packet -
+ * offset: offset into the mbuf chain to first chunkhdr - length: is the
+ * length of the complete packet outputs: - length: modified to remaining
+ * length after control processing - netp: modified to new sctp_nets after
+ * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
+ * bad packet,...) otherwise return the tcb for this packet
+ */
+#ifdef __GNUC__
+__attribute__ ((noinline))
+#endif
+static struct sctp_tcb *
+sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_association *asoc;
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+ uint32_t vtag_in;
+ int num_chunks = 0; /* number of control chunks processed */
+ uint32_t chk_length, contiguous;
+ int ret;
+ int abort_no_unlock = 0;
+ int ecne_seen = 0;
+ /*
+ * How big should this be, and should it be alloc'd? Lets try the
+ * d-mtu-ceiling for now (2k) and that should hopefully work ...
+ * until we get into jumbo grams and such..
+ */
+ uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
+ int got_auth = 0;
+ uint32_t auth_offset = 0, auth_len = 0;
+ int auth_skipped = 0;
+ int asconf_cnt = 0;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
+ iphlen, *offset, length, (void *)stcb);
+
+ if (stcb) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ /* validate chunk header length... */
+ if (ntohs(ch->chunk_length) < sizeof(*ch)) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
+ ntohs(ch->chunk_length));
+ *offset = length;
+ return (stcb);
+ }
+ /*
+ * validate the verification tag
+ */
+ vtag_in = ntohl(sh->v_tag);
+
+ if (ch->chunk_type == SCTP_INITIATION) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
+ ntohs(ch->chunk_length), vtag_in);
+ if (vtag_in != 0) {
+ /* protocol error- silently discard... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return (NULL);
+ }
+ } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
+ /*
+ * If there is no stcb, skip the AUTH chunk and process
+ * later after a stcb is found (to validate the lookup was
+ * valid.
+ */
+ if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
+ (stcb == NULL) &&
+ (inp->auth_supported == 1)) {
+ /* save this chunk for later processing */
+ auth_skipped = 1;
+ auth_offset = *offset;
+ auth_len = ntohs(ch->chunk_length);
+
+ /* (temporarily) move past this chunk */
+ *offset += SCTP_SIZE32(auth_len);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ *offset = length;
+ return (NULL);
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ }
+ if (ch == NULL) {
+ /* Help */
+ *offset = length;
+ return (stcb);
+ }
+ if (ch->chunk_type == SCTP_COOKIE_ECHO) {
+ goto process_control_chunks;
+ }
+ /*
+ * first check if it's an ASCONF with an unknown src addr we
+ * need to look inside to find the association
+ */
+ if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
+ struct sctp_chunkhdr *asconf_ch = ch;
+ uint32_t asconf_offset = 0, asconf_len = 0;
+
+ /* inp's refcount may be reduced */
+ SCTP_INP_INCR_REF(inp);
+
+ asconf_offset = *offset;
+ do {
+ asconf_len = ntohs(asconf_ch->chunk_length);
+ if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
+ break;
+ stcb = sctp_findassociation_ep_asconf(m,
+ *offset,
+ dst,
+ sh, &inp, netp, vrf_id);
+ if (stcb != NULL)
+ break;
+ asconf_offset += SCTP_SIZE32(asconf_len);
+ asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
+ if (stcb == NULL) {
+ /*
+ * reduce inp's refcount if not reduced in
+ * sctp_findassociation_ep_asconf().
+ */
+ SCTP_INP_DECR_REF(inp);
+ }
+
+ /* now go back and verify any auth chunk to be sure */
+ if (auth_skipped && (stcb != NULL)) {
+ struct sctp_auth_chunk *auth;
+
+ if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
+ auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
+ got_auth = 1;
+ auth_skipped = 0;
+ } else {
+ auth = NULL;
+ }
+ if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
+ auth_offset)) {
+ /* auth HMAC failed so dump it */
+ *offset = length;
+ return (stcb);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+ }
+ if (stcb == NULL) {
+ SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ /* no association, so it's out of the blue... */
+ sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ *offset = length;
+ return (NULL);
+ }
+ asoc = &stcb->asoc;
+ /* ABORT and SHUTDOWN can use either v_tag... */
+ if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
+ (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
+ (ch->chunk_type == SCTP_PACKET_DROPPED)) {
+ /* Take the T-bit always into account. */
+ if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
+ (vtag_in == asoc->my_vtag)) ||
+ (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
+ (asoc->peer_vtag != htonl(0)) &&
+ (vtag_in == asoc->peer_vtag))) {
+ /* this is valid */
+ } else {
+ /* drop this packet... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return (NULL);
+ }
+ } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ if (vtag_in != asoc->my_vtag) {
+ /*
+ * this could be a stale SHUTDOWN-ACK or the
+ * peer never got the SHUTDOWN-COMPLETE and
+ * is still hung; we have started a new asoc
+ * but it won't complete until the shutdown
+ * is completed
+ */
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_handle_ootb(m, iphlen, *offset, src, dst,
+ sh, inp, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ return (NULL);
+ }
+ } else {
+ /* for all other chunks, vtag must match */
+ if (vtag_in != asoc->my_vtag) {
+ /* invalid vtag... */
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "invalid vtag: %xh, expect %xh\n",
+ vtag_in, asoc->my_vtag);
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ } /* end if !SCTP_COOKIE_ECHO */
+ /*
+ * process all control chunks...
+ */
+ if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
+ (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
+ (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* implied cookie-ack.. we must have lost the ack */
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
+ *netp);
+ }
+
+ process_control_chunks:
+ while (IS_SCTP_CONTROL(ch)) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->chunk_length);
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
+ ch->chunk_type, chk_length);
+ SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
+ if (chk_length < sizeof(*ch) ||
+ (*offset + (int)chk_length) > length) {
+ *offset = length;
+ return (stcb);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
+ /*
+ * INIT and INIT-ACK only gets the init ack "header" portion
+ * only because we don't have to process the peer's COOKIE.
+ * All others get a complete chunk.
+ */
+ switch (ch->chunk_type) {
+ case SCTP_INITIATION:
+ contiguous = sizeof(struct sctp_init_chunk);
+ break;
+ case SCTP_INITIATION_ACK:
+ contiguous = sizeof(struct sctp_init_ack_chunk);
+ break;
+ default:
+ contiguous = min(chk_length, sizeof(chunk_buf));
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ contiguous,
+ chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return (NULL);
+ }
+
+ num_chunks++;
+ /* Save off the last place we got a control from */
+ if (stcb != NULL) {
+ if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
+ /*
+ * allow last_control to be NULL if
+ * ASCONF... ASCONF processing will find the
+ * right net later
+ */
+ if ((netp != NULL) && (*netp != NULL))
+ stcb->asoc.last_control_chunk_from = *netp;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB0, ch->chunk_type);
+#endif
+
+ /* check to see if this chunk required auth, but isn't */
+ if ((stcb != NULL) &&
+ sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ goto next_chunk;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_INITIATION:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
+ /* The INIT chunk must be the only chunk. */
+ if ((num_chunks > 1) ||
+ (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+ /* RFC 4960 requires that no ABORT is sent */
+ *offset = length;
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return (NULL);
+ }
+ /* Honor our resource limit. */
+ if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_association(inp, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ *offset = length;
+ return (NULL);
+ }
+ sctp_handle_init(m, iphlen, *offset, src, dst, sh,
+ (struct sctp_init_chunk *)ch, inp,
+ stcb, *netp, &abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ *offset = length;
+ if ((!abort_no_unlock) && (stcb != NULL)) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return (NULL);
+ break;
+ case SCTP_PAD_CHUNK:
+ break;
+ case SCTP_INITIATION_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ *offset = length;
+ if (stcb != NULL) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ return (NULL);
+ }
+ }
+ /* The INIT-ACK chunk must be the only chunk. */
+ if ((num_chunks > 1) ||
+ (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+ *offset = length;
+ return (stcb);
+ }
+ if ((netp != NULL) && (*netp != NULL)) {
+ ret = sctp_handle_init_ack(m, iphlen, *offset,
+ src, dst, sh,
+ (struct sctp_init_ack_chunk *)ch,
+ stcb, *netp,
+ &abort_no_unlock,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id);
+ } else {
+ ret = -1;
+ }
+ *offset = length;
+ if (abort_no_unlock) {
+ return (NULL);
+ }
+ /*
+ * Special case, I must call the output routine to
+ * get the cookie echoed
+ */
+ if ((stcb != NULL) && (ret == 0)) {
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ }
+ return (stcb);
+ break;
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK:
+ {
+ int abort_now = 0;
+ uint32_t a_rwnd, cum_ack;
+ uint16_t num_seg, num_nr_seg, num_dup;
+ uint8_t flags;
+ int offset_seg, offset_dup;
+
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
+ ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
+ SCTP_STAT_INCR(sctps_recvsacks);
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
+ (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
+ break;
+ }
+ if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
+ if (chk_length < sizeof(struct sctp_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
+ break;
+ }
+ } else {
+ if (stcb->asoc.nrsack_supported == 0) {
+ goto unknown_chunk;
+ }
+ if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
+ break;
+ }
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /*-
+ * If we have sent a shutdown-ack, we will pay no
+ * attention to a sack sent in to us since
+ * we don't care anymore.
+ */
+ break;
+ }
+ flags = ch->chunk_flags;
+ if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
+ struct sctp_sack_chunk *sack;
+
+ sack = (struct sctp_sack_chunk *)ch;
+ cum_ack = ntohl(sack->sack.cum_tsn_ack);
+ num_seg = ntohs(sack->sack.num_gap_ack_blks);
+ num_nr_seg = 0;
+ num_dup = ntohs(sack->sack.num_dup_tsns);
+ a_rwnd = ntohl(sack->sack.a_rwnd);
+ if (sizeof(struct sctp_sack_chunk) +
+ num_seg * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_sack_chunk);
+ offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
+ } else {
+ struct sctp_nr_sack_chunk *nr_sack;
+
+ nr_sack = (struct sctp_nr_sack_chunk *)ch;
+ cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
+ num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
+ num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+ num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
+ a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
+ if (sizeof(struct sctp_nr_sack_chunk) +
+ (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
+ offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+ (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
+ cum_ack, num_seg, a_rwnd);
+ stcb->asoc.seen_a_sack_this_pkt = 1;
+ if ((stcb->asoc.pr_sctp_cnt == 0) &&
+ (num_seg == 0) && (num_nr_seg == 0) &&
+ SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
+ (stcb->asoc.saw_sack_with_frags == 0) &&
+ (stcb->asoc.saw_sack_with_nr_frags == 0) &&
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ /*
+ * We have a SIMPLE sack having no
+ * prior segments and data on sent
+ * queue to be acked. Use the
+ * faster path sack processing. We
+ * also allow window update sacks
+ * with no missing segments to go
+ * this way too.
+ */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
+ &abort_now, ecne_seen);
+ } else {
+ if ((netp != NULL) && (*netp != NULL)) {
+ sctp_handle_sack(m, offset_seg, offset_dup, stcb,
+ num_seg, num_nr_seg, num_dup, &abort_now, flags,
+ cum_ack, a_rwnd, ecne_seen);
+ }
+ }
+ if (abort_now) {
+ /* ABORT signal from sack processing */
+ *offset = length;
+ return (NULL);
+ }
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ }
+ case SCTP_HEARTBEAT_REQUEST:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ SCTP_STAT_INCR(sctps_recvheartbeat);
+ sctp_send_heartbeat_ack(stcb, m, *offset,
+ chk_length, *netp);
+ }
+ break;
+ case SCTP_HEARTBEAT_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+ SCTP_STAT_INCR(sctps_recvheartbeatack);
+ if ((netp != NULL) && (*netp != NULL)) {
+ sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
+ stcb, *netp);
+ }
+ break;
+ case SCTP_ABORT_ASSOCIATION:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
+ (void *)stcb);
+ *offset = length;
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
+ return (NULL);
+ } else {
+ return (stcb);
+ }
+ } else {
+ return (NULL);
+ }
+ break;
+ case SCTP_SHUTDOWN:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
+ (void *)stcb);
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
+ *offset = length;
+ return (stcb);
+ }
+ if ((netp != NULL) && (*netp != NULL)) {
+ int abort_flag = 0;
+
+ sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
+ stcb, *netp, &abort_flag);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+ case SCTP_SHUTDOWN_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
+ }
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_OPERATION_ERROR:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
+ sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_COOKIE_ECHO:
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
+ if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
+ ;
+ } else {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ abend:
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ /*-
+ * First are we accepting? We do this again here
+ * since it is possible that a previous endpoint WAS
+ * listening responded to a INIT-ACK and then
+ * closed. We opened and bound.. and are now no
+ * longer listening.
+ *
+ * XXXGL: notes on checking listen queue length.
+ * 1) SCTP_IS_LISTENING() doesn't necessarily mean
+ * SOLISTENING(), because a listening "UDP type"
+ * socket isn't listening in terms of the socket
+ * layer. It is a normal data flow socket, that
+ * can fork off new connections. Thus, we should
+ * look into sol_qlen only in case we are !UDP.
+ * 2) Checking sol_qlen in general requires locking
+ * the socket, and this code lacks that.
+ */
+ if ((stcb == NULL) &&
+ (!SCTP_IS_LISTENING(inp) ||
+ (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
+#else
+ inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) {
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
+ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+ sctp_abort_association(inp, stcb, m, iphlen,
+ src, dst, sh, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ vrf_id, port);
+ }
+ *offset = length;
+ return (NULL);
+ } else {
+ struct mbuf *ret_buf;
+ struct sctp_inpcb *linp;
+ struct sctp_tmit_chunk *chk;
+
+ if (stcb) {
+ linp = NULL;
+ } else {
+ linp = inp;
+ }
+
+ if (linp != NULL) {
+ SCTP_ASOC_CREATE_LOCK(linp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ SCTP_ASOC_CREATE_UNLOCK(linp);
+ goto abend;
+ }
+ }
+
+ if (netp != NULL) {
+ struct sctp_tcb *locked_stcb;
+
+ locked_stcb = stcb;
+ ret_buf =
+ sctp_handle_cookie_echo(m, iphlen,
+ *offset,
+ src, dst,
+ sh,
+ (struct sctp_cookie_echo_chunk *)ch,
+ &inp, &stcb, netp,
+ auth_skipped,
+ auth_offset,
+ auth_len,
+ &locked_stcb,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype,
+ mflowid,
+#endif
+ vrf_id,
+ port);
+ if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
+ SCTP_TCB_UNLOCK(locked_stcb);
+ }
+ if (stcb != NULL) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ } else {
+ ret_buf = NULL;
+ }
+ if (linp != NULL) {
+ SCTP_ASOC_CREATE_UNLOCK(linp);
+ }
+ if (ret_buf == NULL) {
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "GAK, null buffer\n");
+ *offset = length;
+ return (NULL);
+ }
+ /* if AUTH skipped, see if it verified... */
+ if (auth_skipped) {
+ got_auth = 1;
+ auth_skipped = 0;
+ }
+ /* Restart the timer if we have pending data */
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (chk != NULL) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
+ if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
+ return (stcb);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else if (stcb) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ *offset = length;
+ return (NULL);
+ }
+ }
+ if ((netp != NULL) && (*netp != NULL)) {
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
+ }
+ break;
+ case SCTP_ECN_ECHO:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+ if (stcb->asoc.ecn_supported == 0) {
+ goto unknown_chunk;
+ }
+ sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
+ ecne_seen = 1;
+ break;
+ case SCTP_ECN_CWR:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
+ *offset = length;
+ return (stcb);
+ }
+ if (stcb->asoc.ecn_supported == 0) {
+ goto unknown_chunk;
+ }
+ sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
+ break;
+ case SCTP_SHUTDOWN_COMPLETE:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
+ /* must be first and only chunk */
+ if ((num_chunks > 1) ||
+ (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+ *offset = length;
+ return (stcb);
+ }
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
+ stcb, *netp);
+ }
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_ASCONF:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
+ if (stcb != NULL) {
+ if (stcb->asoc.asconf_supported == 0) {
+ goto unknown_chunk;
+ }
+ sctp_handle_asconf(m, *offset, src,
+ (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
+ asconf_cnt++;
+ }
+ break;
+ case SCTP_ASCONF_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
+ if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ if (stcb->asoc.asconf_supported == 0) {
+ goto unknown_chunk;
+ }
+ /* He's alive so give him credit */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_asconf_ack(m, *offset,
+ (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
+ if (abort_no_unlock)
+ return (NULL);
+ }
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ case SCTP_IFORWARD_CUM_TSN:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
+ ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN");
+ if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+
+ if (stcb != NULL) {
+ int abort_flag = 0;
+
+ if (stcb->asoc.prsctp_supported == 0) {
+ goto unknown_chunk;
+ }
+ if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) ||
+ ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) {
+ if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) {
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated");
+ } else {
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated");
+ }
+ op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ *offset = length;
+ return (NULL);
+ }
+ *fwd_tsn_seen = 1;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ *offset = length;
+ return (NULL);
+ }
+ /*
+ * For sending a SACK this looks like DATA
+ * chunks.
+ */
+ stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
+ sctp_handle_forward_tsn(stcb,
+ (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+ case SCTP_STREAM_RESET:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
+ if ((stcb == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req))) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+ if (stcb->asoc.reconfig_supported == 0) {
+ goto unknown_chunk;
+ }
+ if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
+ /* stop processing */
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_PACKET_DROPPED:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
+ /* re-get it all please */
+ if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+
+ if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
+ if (stcb->asoc.pktdrop_supported == 0) {
+ goto unknown_chunk;
+ }
+ sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
+ stcb, *netp,
+ min(chk_length, contiguous));
+ }
+ break;
+ case SCTP_AUTHENTICATION:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
+ if (stcb == NULL) {
+ /* save the first AUTH for later processing */
+ if (auth_skipped == 0) {
+ auth_offset = *offset;
+ auth_len = chk_length;
+ auth_skipped = 1;
+ }
+ /* skip this chunk (temporarily) */
+ goto next_chunk;
+ }
+ if (stcb->asoc.auth_supported == 0) {
+ goto unknown_chunk;
+ }
+ if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
+ (chk_length > (sizeof(struct sctp_auth_chunk) +
+ SCTP_AUTH_DIGEST_LEN_MAX))) {
+ /* Its not ours */
+ *offset = length;
+ return (stcb);
+ }
+ if (got_auth == 1) {
+ /* skip this chunk... it's already auth'd */
+ goto next_chunk;
+ }
+ got_auth = 1;
+ if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) {
+ /* auth HMAC failed so dump the packet */
+ *offset = length;
+ return (stcb);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ break;
+
+ default:
+ unknown_chunk:
+ /* it's an unknown chunk! */
+ if ((ch->chunk_type & 0x40) &&
+ (stcb != NULL) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
+ struct sctp_gen_error_cause *cause;
+ int len;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
+ 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err != NULL) {
+ len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
+ cause = mtod(op_err, struct sctp_gen_error_cause *);
+ cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+ SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
+ if (SCTP_BUF_NEXT(op_err) != NULL) {
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
+ }
+#endif
+ sctp_queue_op_err(stcb, op_err);
+ } else {
+ sctp_m_freem(op_err);
+ }
+ }
+ }
+ if ((ch->chunk_type & 0x80) == 0) {
+ /* discard this packet */
+ *offset = length;
+ return (stcb);
+ } /* else skip this bad chunk and continue... */
+ break;
+ } /* switch (ch->chunk_type) */
+
+
+ next_chunk:
+ /* get the next chunk */
+ *offset += SCTP_SIZE32(chk_length);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ return (stcb);
+ }
+ } /* while */
+
+ if ((asconf_cnt > 0) && (stcb != NULL)) {
+ sctp_send_asconf_ack(stcb);
+ }
+ return (stcb);
+}
+
+
+/*
+ * common input chunk processing (v4 and v6)
+ */
+void
+sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ uint8_t compute_crc,
+ uint8_t ecn_bits,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ uint32_t high_tsn;
+ int fwd_tsn_seen = 0, data_processed = 0;
+ struct mbuf *m = *mm, *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+ int un_sent;
+ int cnt_ctrl_ready = 0;
+ struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net = NULL;
+#if defined(__Userspace__)
+ struct socket *upcall_socket = NULL;
+#endif
+
+ SCTP_STAT_INCR(sctps_recvdatagrams);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 1);
+ sctp_auditing(0, inp, stcb, net);
+#endif
+ if (compute_crc != 0) {
+ uint32_t check, calc_check;
+
+ check = sh->checksum;
+ sh->checksum = 0;
+ calc_check = sctp_calculate_cksum(m, iphlen);
+ sh->checksum = check;
+ if (calc_check != check) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
+ calc_check, check, (void *)m, length, iphlen);
+ stcb = sctp_findassociation_addr(m, offset, src, dst,
+ sh, ch, &inp, &net, vrf_id);
+#if defined(INET) || defined(INET6)
+ if ((ch->chunk_type != SCTP_INITIATION) &&
+ (net != NULL) && (net->port != port)) {
+ if (net->port == 0) {
+ /* UDP encapsulation turned on. */
+ net->mtu -= sizeof(struct udphdr);
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+ } else if (port == 0) {
+ /* UDP encapsulation turned off. */
+ net->mtu += sizeof(struct udphdr);
+ /* XXX Update smallest_mtu */
+ }
+ net->port = port;
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net != NULL) {
+ net->flowtype = mflowtype;
+ net->flowid = mflowid;
+ }
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ if ((inp != NULL) && (stcb != NULL)) {
+ sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
+ } else if ((inp != NULL) && (stcb == NULL)) {
+ inp_decr = inp;
+ }
+ SCTP_STAT_INCR(sctps_badsum);
+ SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+ goto out;
+ }
+ }
+ /* Destination port of 0 is illegal, based on RFC4960. */
+ if (sh->dest_port == 0) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto out;
+ }
+ stcb = sctp_findassociation_addr(m, offset, src, dst,
+ sh, ch, &inp, &net, vrf_id);
+#if defined(INET) || defined(INET6)
+ if ((ch->chunk_type != SCTP_INITIATION) &&
+ (net != NULL) && (net->port != port)) {
+ if (net->port == 0) {
+ /* UDP encapsulation turned on. */
+ net->mtu -= sizeof(struct udphdr);
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+ } else if (port == 0) {
+ /* UDP encapsulation turned off. */
+ net->mtu += sizeof(struct udphdr);
+ /* XXX Update smallest_mtu */
+ }
+ net->port = port;
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net != NULL) {
+ net->flowtype = mflowtype;
+ net->flowid = mflowid;
+ }
+#endif
+ if (inp == NULL) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ SCTP_STAT_INCR(sctps_noport);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
+ goto out;
+ }
+#endif
+ if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ sctp_send_shutdown_complete2(src, dst, sh,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ goto out;
+ }
+ if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
+ goto out;
+ }
+ if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
+ if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
+ ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
+ (ch->chunk_type != SCTP_INIT))) {
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Out of the blue");
+ sctp_send_abort(m, iphlen, src, dst,
+ sh, 0, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ }
+ }
+ goto out;
+ } else if (stcb == NULL) {
+ inp_decr = inp;
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
+ (void *)m, iphlen, offset, length, (void *)stcb);
+ if (stcb) {
+ /* always clear this before beginning a packet */
+ stcb->asoc.authenticated = 0;
+ stcb->asoc.seen_a_sack_this_pkt = 0;
+ SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
+ (void *)stcb, stcb->asoc.state);
+
+ if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+ /*-
+ * If we hit here, we had a ref count
+ * up when the assoc was aborted and the
+ * timer is clearing out the assoc, we should
+ * NOT respond to any packet.. its OOTB.
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ stcb = NULL;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ goto out;
+ }
+ }
+#if defined(__Userspace__)
+ if ((stcb != NULL) &&
+ !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ if (stcb->sctp_socket->so_head != NULL) {
+ upcall_socket = stcb->sctp_socket->so_head;
+ } else {
+ upcall_socket = stcb->sctp_socket;
+ }
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ }
+#endif
+ if (IS_SCTP_CONTROL(ch)) {
+ /* process the control portion of the SCTP packet */
+ /* sa_ignore NO_NULL_CHK */
+ stcb = sctp_process_control(m, iphlen, &offset, length,
+ src, dst, sh, ch,
+ inp, stcb, &net, &fwd_tsn_seen,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ if (stcb) {
+ /* This covers us if the cookie-echo was there
+ * and it changes our INP.
+ */
+ inp = stcb->sctp_ep;
+#if defined(INET) || defined(INET6)
+ if ((ch->chunk_type != SCTP_INITIATION) &&
+ (net != NULL) && (net->port != port)) {
+ if (net->port == 0) {
+ /* UDP encapsulation turned on. */
+ net->mtu -= sizeof(struct udphdr);
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+ } else if (port == 0) {
+ /* UDP encapsulation turned off. */
+ net->mtu += sizeof(struct udphdr);
+ /* XXX Update smallest_mtu */
+ }
+ net->port = port;
+ }
+#endif
+ }
+ } else {
+ /*
+ * no control chunks, so pre-process DATA chunks (these
+ * checks are taken care of by control processing)
+ */
+
+ /*
+ * if DATA only packet, and auth is required, then punt...
+ * can't have authenticated without any AUTH (control)
+ * chunks
+ */
+ if ((stcb != NULL) &&
+ sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
+ /* "silently" ignore */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ goto out;
+ }
+ if (stcb == NULL) {
+ /* out of the blue DATA chunk */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
+#endif
+ SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ goto out;
+ }
+ if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
+ /* v_tag mismatch! */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ SCTP_STAT_INCR(sctps_badvtag);
+ goto out;
+ }
+ }
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
+#endif
+ if (stcb == NULL) {
+ /*
+ * no valid TCB for this packet, or we found it's a bad
+ * packet while processing control, or we're done with this
+ * packet (done or skip rest of data), so we drop it...
+ */
+ goto out;
+ }
+#if defined(__Userspace__)
+ if ((upcall_socket == NULL) &&
+ !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ if (stcb->sctp_socket->so_head != NULL) {
+ upcall_socket = stcb->sctp_socket->so_head;
+ } else {
+ upcall_socket = stcb->sctp_socket;
+ }
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ }
+#endif
+
+ /*
+ * DATA chunk processing
+ */
+ /* plow through the data chunks while length > offset */
+
+ /*
+ * Rest should be DATA only. Check authentication state if AUTH for
+ * DATA is required.
+ */
+ if ((length > offset) &&
+ (stcb != NULL) &&
+ sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "Data chunk requires AUTH, skipped\n");
+ goto trigger_send;
+ }
+ if (length > offset) {
+ int retval;
+
+ /*
+ * First check to make sure our state is correct. We would
+ * not get here unless we really did have a tag, so we don't
+ * abort if this happens, just dump the chunk silently.
+ */
+ switch (SCTP_GET_STATE(stcb)) {
+ case SCTP_STATE_COOKIE_ECHOED:
+ /*
+ * we consider data with valid tags in this state
+ * shows us the cookie-ack was lost. Imply it was
+ * there.
+ */
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
+ break;
+ case SCTP_STATE_COOKIE_WAIT:
+ /*
+ * We consider OOTB any data sent during asoc setup.
+ */
+ SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ goto out;
+ /*sa_ignore NOTREACHED*/
+ break;
+ case SCTP_STATE_EMPTY: /* should not happen */
+ case SCTP_STATE_INUSE: /* should not happen */
+ case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
+ case SCTP_STATE_SHUTDOWN_ACK_SENT:
+ default:
+ goto out;
+ /*sa_ignore NOTREACHED*/
+ break;
+ case SCTP_STATE_OPEN:
+ case SCTP_STATE_SHUTDOWN_SENT:
+ break;
+ }
+ /* plow through the data chunks while length > offset */
+ retval = sctp_process_data(mm, iphlen, &offset, length,
+ inp, stcb, net, &high_tsn);
+ if (retval == 2) {
+ /*
+ * The association aborted, NO UNLOCK needed since
+ * the association is destroyed.
+ */
+ stcb = NULL;
+ goto out;
+ }
+ data_processed = 1;
+ /*
+ * Anything important needs to have been m_copy'ed in
+ * process_data
+ */
+ }
+
+ /* take care of ecn */
+ if ((data_processed == 1) &&
+ (stcb->asoc.ecn_supported == 1) &&
+ ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
+ /* Yep, we need to add a ECNE */
+ sctp_send_ecn_echo(stcb, net, high_tsn);
+ }
+
+ if ((data_processed == 0) && (fwd_tsn_seen)) {
+ int was_a_gap;
+ uint32_t highest_tsn;
+
+ if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
+ highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = stcb->asoc.highest_tsn_inside_map;
+ }
+ was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+ stcb->asoc.send_sack = 1;
+ sctp_sack_check(stcb, was_a_gap);
+ } else if (fwd_tsn_seen) {
+ stcb->asoc.send_sack = 1;
+ }
+ /* trigger send of any chunks in queue... */
+trigger_send:
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 2);
+ sctp_auditing(1, inp, stcb, net);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "Check for chunk output prw:%d tqe:%d tf=%d\n",
+ stcb->asoc.peers_rwnd,
+ TAILQ_EMPTY(&stcb->asoc.control_send_queue),
+ stcb->asoc.total_flight);
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
+ }
+ if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
+ cnt_ctrl_ready ||
+ stcb->asoc.trigger_reset ||
+ ((un_sent > 0) &&
+ (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 3);
+ sctp_auditing(2, inp, stcb, net);
+#endif
+ out:
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+#if defined(__Userspace__)
+ if (upcall_socket != NULL) {
+ if (upcall_socket->so_upcall != NULL) {
+ if (soreadable(upcall_socket) ||
+ sowriteable(upcall_socket) ||
+ upcall_socket->so_error) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ if (inp_decr != NULL) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp_decr);
+ SCTP_INP_DECR_REF(inp_decr);
+ SCTP_INP_WUNLOCK(inp_decr);
+ }
+ return;
+}
+
+#ifdef INET
+#if !defined(__Userspace__)
+void
+sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
+{
+ struct mbuf *m;
+ int iphlen;
+ uint32_t vrf_id = 0;
+ uint8_t ecn_bits;
+ struct sockaddr_in src, dst;
+ struct ip *ip;
+ struct sctphdr *sh;
+ struct sctp_chunkhdr *ch;
+ int length, offset;
+ uint8_t compute_crc;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint32_t mflowid;
+ uint8_t mflowtype;
+ uint16_t fibnum;
+#endif
+#if defined(__Userspace__)
+ uint16_t port = 0;
+#endif
+
+ iphlen = off;
+ if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
+ SCTP_RELEASE_PKT(i_pak);
+ return;
+ }
+ m = SCTP_HEADER_TO_CHAIN(i_pak);
+#ifdef SCTP_MBUF_LOGGING
+ /* Log in any input mbufs */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(m, SCTP_MBUF_INPUT);
+ }
+#endif
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(m);
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
+ m->m_pkthdr.len,
+ if_name(m->m_pkthdr.rcvif),
+ (int)m->m_pkthdr.csum_flags, CSUM_BITS);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ m->m_pkthdr.rcvif->if_name,
+ m->m_pkthdr.rcvif->if_unit,
+ m->m_pkthdr.csum_flags);
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ m->m_pkthdr.rcvif->if_xname,
+ m->m_pkthdr.csum_flags);
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowid = m->m_pkthdr.flowid;
+ mflowtype = M_HASHTYPE_GET(m);
+ fibnum = M_GETFIB(m);
+#endif
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+ /* Get IP, SCTP, and first chunk header together in the first mbuf. */
+ offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ if (SCTP_BUF_LEN(m) < offset) {
+ if ((m = m_pullup(m, offset)) == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return;
+ }
+ }
+ ip = mtod(m, struct ip *);
+ sh = (struct sctphdr *)((caddr_t)ip + iphlen);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset -= sizeof(struct sctp_chunkhdr);
+ memset(&src, 0, sizeof(struct sockaddr_in));
+ src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ src.sin_len = sizeof(struct sockaddr_in);
+#endif
+ src.sin_port = sh->src_port;
+ src.sin_addr = ip->ip_src;
+ memset(&dst, 0, sizeof(struct sockaddr_in));
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ dst.sin_port = sh->dest_port;
+ dst.sin_addr = ip->ip_dst;
+#if defined(_WIN32) && !defined(__Userspace__)
+ NTOHS(ip->ip_len);
+#endif
+#if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__))
+ ip->ip_len = ntohs(ip->ip_len);
+#endif
+#if defined(__Userspace__)
+#if defined(__linux__) || defined(_WIN32)
+ length = ip->ip_len;
+#else
+ length = ip->ip_len + iphlen;
+#endif
+#elif defined(__FreeBSD__)
+ length = ntohs(ip->ip_len);
+#elif defined(__APPLE__)
+ length = ip->ip_len + iphlen;
+#else
+ length = ip->ip_len;
+#endif
+ /* Validate mbuf chain length with IP payload length. */
+ if (SCTP_HEADER_LEN(m) != length) {
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto out;
+ }
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
+ goto out;
+ }
+ if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
+ goto out;
+ }
+ ecn_bits = ip->ip_tos;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ compute_crc = 0;
+ } else {
+#else
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
+ (SCTP_IS_IT_LOOPBACK(m)))) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ compute_crc = 0;
+ } else {
+#endif
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ compute_crc = 1;
+ }
+ sctp_common_input_processing(&m, iphlen, offset, length,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ ecn_bits,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ out:
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return;
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+extern int *sctp_cpuarry;
+#endif
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+int
+sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
+{
+ struct mbuf *m;
+ int off;
+
+ m = *mp;
+ off = *offp;
+#else
+void
+sctp_input(struct mbuf *m, int off)
+{
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+ if (mp_ncpus > 1) {
+ struct ip *ip;
+ struct sctphdr *sh;
+ int offset;
+ int cpu_to_use;
+ uint32_t flowid, tag;
+
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+ flowid = m->m_pkthdr.flowid;
+ } else {
+ /* No flow id built by lower layers
+ * fix it so we create one.
+ */
+ offset = off + sizeof(struct sctphdr);
+ if (SCTP_BUF_LEN(m) < offset) {
+ if ((m = m_pullup(m, offset)) == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return (IPPROTO_DONE);
+ }
+ }
+ ip = mtod(m, struct ip *);
+ sh = (struct sctphdr *)((caddr_t)ip + off);
+ tag = htonl(sh->v_tag);
+ flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
+ m->m_pkthdr.flowid = flowid;
+ M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
+ }
+ cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
+ sctp_queue_to_mcore(m, off, cpu_to_use);
+ return (IPPROTO_DONE);
+ }
+#endif
+#endif
+ sctp_input_with_port(m, off, 0);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ return (IPPROTO_DONE);
+#endif
+}
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.h
new file mode 100644
index 000000000..55a174bae
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_input.h
@@ -0,0 +1,66 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.h 326672 2017-12-07 22:19:08Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_INPUT_H_
+#define _NETINET_SCTP_INPUT_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+void
+sctp_common_input_processing(struct mbuf **, int, int, int,
+ struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, struct sctp_chunkhdr *,
+ uint8_t,
+ uint8_t,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t, uint16_t,
+#endif
+ uint32_t, uint16_t);
+
+struct sctp_stream_reset_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq,
+ struct sctp_tmit_chunk **bchk);
+
+void sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries,
+ uint16_t *list);
+
+
+int sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked);
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_lock_userspace.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_lock_userspace.h
new file mode 100644
index 000000000..0aed34770
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_lock_userspace.h
@@ -0,0 +1,245 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2008-2012, by Brad Penoff. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifndef _NETINET_SCTP_LOCK_EMPTY_H_
+#define _NETINET_SCTP_LOCK_EMPTY_H_
+
+/*
+ * Empty Lock declarations for all other platforms. Pre-process away to
+ * nothing.
+ */
+
+/* __Userspace__ putting lock macros in same order as sctp_lock_bsd.h ...*/
+
+#define SCTP_IPI_COUNT_INIT()
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#define SCTP_INP_INFO_LOCK_DESTROY()
+
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_TRYLOCK() 1
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+
+#define SCTP_WQ_ADDR_INIT()
+#define SCTP_WQ_ADDR_DESTROY()
+#define SCTP_WQ_ADDR_LOCK()
+#define SCTP_WQ_ADDR_UNLOCK()
+#define SCTP_WQ_ADDR_LOCK_ASSERT()
+
+#define SCTP_IPI_ADDR_INIT()
+#define SCTP_IPI_ADDR_DESTROY()
+#define SCTP_IPI_ADDR_RLOCK()
+#define SCTP_IPI_ADDR_WLOCK()
+#define SCTP_IPI_ADDR_RUNLOCK()
+#define SCTP_IPI_ADDR_WUNLOCK()
+#define SCTP_IPI_ADDR_LOCK_ASSERT()
+#define SCTP_IPI_ADDR_WLOCK_ASSERT()
+
+#define SCTP_IPI_ITERATOR_WQ_INIT()
+#define SCTP_IPI_ITERATOR_WQ_DESTROY()
+#define SCTP_IPI_ITERATOR_WQ_LOCK()
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK()
+
+#define SCTP_IP_PKTLOG_INIT()
+#define SCTP_IP_PKTLOG_LOCK()
+#define SCTP_IP_PKTLOG_UNLOCK()
+#define SCTP_IP_PKTLOG_DESTROY()
+
+#define SCTP_INP_READ_INIT(_inp)
+#define SCTP_INP_READ_DESTROY(_inp)
+#define SCTP_INP_READ_LOCK(_inp)
+#define SCTP_INP_READ_UNLOCK(_inp)
+
+#define SCTP_INP_LOCK_INIT(_inp)
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
+#define SCTP_INP_LOCK_DESTROY(_inp)
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
+
+#define SCTP_INP_RLOCK(_inp)
+#define SCTP_INP_WLOCK(_inp)
+#define SCTP_INP_RLOCK_ASSERT(_inp)
+#define SCTP_INP_WLOCK_ASSERT(_inp)
+
+#define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+#define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb)
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_SEND_LOCK(_tcb)
+#define SCTP_TCB_SEND_UNLOCK(_tcb)
+
+#define SCTP_INP_INCR_REF(_inp)
+#define SCTP_INP_DECR_REF(_inp)
+
+#define SCTP_ASOC_CREATE_LOCK(_inp)
+
+#define SCTP_INP_RUNLOCK(_inp)
+#define SCTP_INP_WUNLOCK(_inp)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp)
+
+
+#define SCTP_TCB_LOCK_INIT(_tcb)
+#define SCTP_TCB_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_LOCK(_tcb)
+#define SCTP_TCB_TRYLOCK(_tcb) 1
+#define SCTP_TCB_UNLOCK(_tcb)
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+
+
+#define SCTP_ITERATOR_LOCK_INIT()
+#define SCTP_ITERATOR_LOCK()
+#define SCTP_ITERATOR_UNLOCK()
+#define SCTP_ITERATOR_LOCK_DESTROY()
+
+
+
+#define SCTP_INCR_EP_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_ep++; \
+ } while (0)
+
+#define SCTP_DECR_EP_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_ep--; \
+ } while (0)
+
+#define SCTP_INCR_ASOC_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_asoc++; \
+ } while (0)
+
+#define SCTP_DECR_ASOC_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_asoc--; \
+ } while (0)
+
+#define SCTP_INCR_LADDR_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_laddr++; \
+ } while (0)
+
+#define SCTP_DECR_LADDR_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_laddr--; \
+ } while (0)
+
+#define SCTP_INCR_RADDR_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_raddr++; \
+ } while (0)
+
+#define SCTP_DECR_RADDR_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_raddr--; \
+ } while (0)
+
+#define SCTP_INCR_CHK_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_chunk++; \
+ } while (0)
+
+#define SCTP_DECR_CHK_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_chunk--; \
+ } while (0)
+
+#define SCTP_INCR_READQ_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_readq++; \
+ } while (0)
+
+#define SCTP_DECR_READQ_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_readq--; \
+ } while (0)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_strmoq++; \
+ } while (0)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+ do { \
+ sctppcbinfo.ipi_count_strmoq--; \
+ } while (0)
+
+
+/* these were in sctp_lock_empty.h but aren't in sctp_lock_bsd.h ... */
+#if 0
+#define SCTP_IPI_ADDR_LOCK()
+#define SCTP_IPI_ADDR_UNLOCK()
+#endif
+
+
+/* These were in sctp_lock_empty.h because they were commented out within
+ * within user_include/user_socketvar.h . If they are NOT commented out
+ * in user_socketvar.h (because that seems the more natural place for them
+ * to live), then change this "if" to 0. Keep the "if" as 1 if these ARE
+ * indeed commented out in user_socketvar.h .
+ *
+ * This modularity is kept so this file can easily be chosen as an alternative
+ * to SCTP_PROCESS_LEVEL_LOCKS. If one defines SCTP_PROCESS_LEVEL_LOCKS in
+ * user_include/opt_sctp.h, then the file sctp_process_lock.h (which we didn't
+ * implement) is used, and that declares these locks already (so using
+ * SCTP_PROCESS_LEVEL_LOCKS *requires* that these defintions be commented out
+ * in user_socketvar.h).
+ */
+#if 1
+#define SOCK_LOCK(_so)
+#define SOCK_UNLOCK(_so)
+#define SOCKBUF_LOCK(_so_buf)
+#define SOCKBUF_UNLOCK(_so_buf)
+#define SOCKBUF_LOCK_ASSERT(_so_buf)
+#endif
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os.h
new file mode 100644
index 000000000..e01b95777
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os.h
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_os.h 361872 2020-06-06 18:20:09Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_OS_H_
+#define _NETINET_SCTP_OS_H_
+
+/*
+ * General kernel memory allocation:
+ * SCTP_MALLOC(element, type, size, name)
+ * SCTP_FREE(element)
+ * Kernel memory allocation for "soname"- memory must be zeroed.
+ * SCTP_MALLOC_SONAME(name, type, size)
+ * SCTP_FREE_SONAME(name)
+ */
+
+/*
+ * Zone(pool) allocation routines: MUST be defined for each OS.
+ * zone = zone/pool pointer.
+ * name = string name of the zone/pool.
+ * size = size of each zone/pool element.
+ * number = number of elements in zone/pool.
+ * type = structure type to allocate
+ *
+ * sctp_zone_t
+ * SCTP_ZONE_INIT(zone, name, size, number)
+ * SCTP_ZONE_GET(zone, type)
+ * SCTP_ZONE_FREE(zone, element)
+ * SCTP_ZONE_DESTROY(zone)
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_os_bsd.h>
+#else
+#define MODULE_GLOBAL(_B) (_B)
+#endif
+
+#if defined(__Userspace__)
+#include <netinet/sctp_os_userspace.h>
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#include <netinet/sctp_os_macosx.h>
+#endif
+
+#if defined(_WIN32) && !defined(__Userspace__)
+#include <netinet/sctp_os_windows.h>
+#endif
+
+/* All os's must implement this address gatherer. If
+ * no VRF's exist, then vrf 0 is the only one and all
+ * addresses and ifn's live here.
+ */
+#define SCTP_DEFAULT_VRF 0
+void sctp_init_vrf_list(int vrfid);
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os_userspace.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os_userspace.h
new file mode 100644
index 000000000..3a9407a82
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_os_userspace.h
@@ -0,0 +1,1140 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2008-2011, by Brad Penoff. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __sctp_os_userspace_h__
+#define __sctp_os_userspace_h__
+/*
+ * Userspace includes
+ * All the opt_xxx.h files are placed in the kernel build directory.
+ * We will place them in userspace stack build directory.
+ */
+
+#include <errno.h>
+
+#if defined(_WIN32)
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <iphlpapi.h>
+#include <mswsock.h>
+#include <windows.h>
+#include "user_environment.h"
+typedef CRITICAL_SECTION userland_mutex_t;
+#if WINVER < 0x0600
+enum {
+ C_SIGNAL = 0,
+ C_BROADCAST = 1,
+ C_MAX_EVENTS = 2
+};
+typedef struct
+{
+ u_int waiters_count;
+ CRITICAL_SECTION waiters_count_lock;
+ HANDLE events_[C_MAX_EVENTS];
+} userland_cond_t;
+void InitializeXPConditionVariable(userland_cond_t *);
+void DeleteXPConditionVariable(userland_cond_t *);
+int SleepXPConditionVariable(userland_cond_t *, userland_mutex_t *);
+void WakeAllXPConditionVariable(userland_cond_t *);
+#define InitializeConditionVariable(cond) InitializeXPConditionVariable(cond)
+#define DeleteConditionVariable(cond) DeleteXPConditionVariable(cond)
+#define SleepConditionVariableCS(cond, mtx, time) SleepXPConditionVariable(cond, mtx)
+#define WakeAllConditionVariable(cond) WakeAllXPConditionVariable(cond)
+#else
+#define DeleteConditionVariable(cond)
+typedef CONDITION_VARIABLE userland_cond_t;
+#endif
+typedef HANDLE userland_thread_t;
+#define ADDRESS_FAMILY unsigned __int8
+#define IPVERSION 4
+#define MAXTTL 255
+/* VS2010 comes with stdint.h */
+#if !defined(_MSC_VER) || (_MSC_VER >= 1600)
+#include <stdint.h>
+#else
+typedef unsigned __int64 uint64_t;
+typedef unsigned __int32 uint32_t;
+typedef __int32 int32_t;
+typedef unsigned __int16 uint16_t;
+typedef __int16 int16_t;
+typedef unsigned __int8 uint8_t;
+typedef __int8 int8_t;
+#endif
+#ifndef _SIZE_T_DEFINED
+#typedef __int32 size_t;
+#endif
+typedef unsigned __int32 u_int;
+typedef unsigned char u_char;
+typedef unsigned __int16 u_short;
+typedef unsigned __int8 sa_family_t;
+#ifndef _SSIZE_T_DEFINED
+typedef __int64 ssize_t;
+#endif
+#if !defined(__MINGW32__)
+#define __func__ __FUNCTION__
+#endif
+#ifndef EWOULDBLOCK
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#endif
+#ifndef EINPROGRESS
+#define EINPROGRESS WSAEINPROGRESS
+#endif
+#ifndef EALREADY
+#define EALREADY WSAEALREADY
+#endif
+#ifndef ENOTSOCK
+#define ENOTSOCK WSAENOTSOCK
+#endif
+#ifndef EDESTADDRREQ
+#define EDESTADDRREQ WSAEDESTADDRREQ
+#endif
+#ifndef EMSGSIZE
+#define EMSGSIZE WSAEMSGSIZE
+#endif
+#ifndef EPROTOTYPE
+#define EPROTOTYPE WSAEPROTOTYPE
+#endif
+#ifndef ENOPROTOOPT
+#define ENOPROTOOPT WSAENOPROTOOPT
+#endif
+#ifndef EPROTONOSUPPORT
+#define EPROTONOSUPPORT WSAEPROTONOSUPPORT
+#endif
+#ifndef ESOCKTNOSUPPORT
+#define ESOCKTNOSUPPORT WSAESOCKTNOSUPPORT
+#endif
+#ifndef EOPNOTSUPP
+#define EOPNOTSUPP WSAEOPNOTSUPP
+#endif
+#ifndef ENOTSUP
+#define ENOTSUP WSAEOPNOTSUPP
+#endif
+#ifndef EPFNOSUPPORT
+#define EPFNOSUPPORT WSAEPFNOSUPPORT
+#endif
+#ifndef EAFNOSUPPORT
+#define EAFNOSUPPORT WSAEAFNOSUPPORT
+#endif
+#ifndef EADDRINUSE
+#define EADDRINUSE WSAEADDRINUSE
+#endif
+#ifndef EADDRNOTAVAIL
+#define EADDRNOTAVAIL WSAEADDRNOTAVAIL
+#endif
+#ifndef ENETDOWN
+#define ENETDOWN WSAENETDOWN
+#endif
+#ifndef ENETUNREACH
+#define ENETUNREACH WSAENETUNREACH
+#endif
+#ifndef ENETRESET
+#define ENETRESET WSAENETRESET
+#endif
+#ifndef ECONNABORTED
+#define ECONNABORTED WSAECONNABORTED
+#endif
+#ifndef ECONNRESET
+#define ECONNRESET WSAECONNRESET
+#endif
+#ifndef ENOBUFS
+#define ENOBUFS WSAENOBUFS
+#endif
+#ifndef EISCONN
+#define EISCONN WSAEISCONN
+#endif
+#ifndef ENOTCONN
+#define ENOTCONN WSAENOTCONN
+#endif
+#ifndef ESHUTDOWN
+#define ESHUTDOWN WSAESHUTDOWN
+#endif
+#ifndef ETOOMANYREFS
+#define ETOOMANYREFS WSAETOOMANYREFS
+#endif
+#ifndef ETIMEDOUT
+#define ETIMEDOUT WSAETIMEDOUT
+#endif
+#ifndef ECONNREFUSED
+#define ECONNREFUSED WSAECONNREFUSED
+#endif
+#ifndef ELOOP
+#define ELOOP WSAELOOP
+#endif
+#ifndef EHOSTDOWN
+#define EHOSTDOWN WSAEHOSTDOWN
+#endif
+#ifndef EHOSTUNREACH
+#define EHOSTUNREACH WSAEHOSTUNREACH
+#endif
+#ifndef EPROCLIM
+#define EPROCLIM WSAEPROCLIM
+#endif
+#ifndef EUSERS
+#define EUSERS WSAEUSERS
+#endif
+#ifndef EDQUOT
+#define EDQUOT WSAEDQUOT
+#endif
+#ifndef ESTALE
+#define ESTALE WSAESTALE
+#endif
+#ifndef EREMOTE
+#define EREMOTE WSAEREMOTE
+#endif
+
+typedef char* caddr_t;
+
+#define bzero(buf, len) memset(buf, 0, len)
+#define bcopy(srcKey, dstKey, len) memcpy(dstKey, srcKey, len)
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900) && !defined(__MINGW32__)
+#define SCTP_SNPRINTF(data, size, format, ...) \
+ if (_snprintf_s(data, size, _TRUNCATE, format, __VA_ARGS__) < 0) { \
+ data[0] = '\0'; \
+ }
+#else
+#define SCTP_SNPRINTF(data, ...) \
+ if (snprintf(data, __VA_ARGS__) < 0 ) { \
+ data[0] = '\0'; \
+ }
+#endif
+
+#define inline __inline
+#define __inline__ __inline
+#define MSG_EOR 0x8 /* data completes record */
+#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */
+
+#ifdef CMSG_DATA
+#undef CMSG_DATA
+#endif
+/*
+ * The following definitions should apply iff WINVER < 0x0600
+ * but that check doesn't work in all cases. So be more pedantic...
+ */
+#define CMSG_DATA(x) WSA_CMSG_DATA(x)
+#define CMSG_ALIGN(x) WSA_CMSGDATA_ALIGN(x)
+#ifndef CMSG_FIRSTHDR
+#define CMSG_FIRSTHDR(x) WSA_CMSG_FIRSTHDR(x)
+#endif
+#ifndef CMSG_NXTHDR
+#define CMSG_NXTHDR(x, y) WSA_CMSG_NXTHDR(x, y)
+#endif
+#ifndef CMSG_SPACE
+#define CMSG_SPACE(x) WSA_CMSG_SPACE(x)
+#endif
+#ifndef CMSG_LEN
+#define CMSG_LEN(x) WSA_CMSG_LEN(x)
+#endif
+
+/**** from sctp_os_windows.h ***************/
+#define SCTP_IFN_IS_IFT_LOOP(ifn) ((ifn)->ifn_type == IFT_LOOP)
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP)
+
+/*
+ * Access to IFN's to help with src-addr-selection
+ */
+/* This could return VOID if the index works but for BSD we provide both. */
+#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) \
+ ((ro)->ro_rt != NULL ? (ro)->ro_rt->rt_ifp : NULL)
+#define SCTP_ROUTE_HAS_VALID_IFN(ro) \
+ ((ro)->ro_rt && (ro)->ro_rt->rt_ifp)
+/******************************************/
+
+#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) 1 /* compiles... TODO use routing socket to determine */
+
+#define BIG_ENDIAN 1
+#define LITTLE_ENDIAN 0
+#ifdef WORDS_BIGENDIAN
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+
+#else /* !defined(Userspace_os_Windows) */
+#include <sys/socket.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__native_client__) || defined(__Fuchsia__)
+#include <pthread.h>
+#endif
+typedef pthread_mutex_t userland_mutex_t;
+typedef pthread_cond_t userland_cond_t;
+typedef pthread_t userland_thread_t;
+#endif
+
+#if defined(_WIN32) || defined(__native_client__)
+
+#define IFNAMSIZ 64
+
+#define random() rand()
+#define srandom(s) srand(s)
+
+#define timeradd(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
+ if ((vvp)->tv_usec >= 1000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+
+#define timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+
+/*#include <packon.h>
+#pragma pack(push, 1)*/
+struct ip {
+ u_char ip_hl:4, ip_v:4;
+ u_char ip_tos;
+ u_short ip_len;
+ u_short ip_id;
+ u_short ip_off;
+#define IP_RP 0x8000
+#define IP_DF 0x4000
+#define IP_MF 0x2000
+#define IP_OFFMASK 0x1fff
+ u_char ip_ttl;
+ u_char ip_p;
+ u_short ip_sum;
+ struct in_addr ip_src, ip_dst;
+};
+
+struct ifaddrs {
+ struct ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ struct sockaddr *ifa_addr;
+ struct sockaddr *ifa_netmask;
+ struct sockaddr *ifa_dstaddr;
+ void *ifa_data;
+};
+
+struct udphdr {
+ uint16_t uh_sport;
+ uint16_t uh_dport;
+ uint16_t uh_ulen;
+ uint16_t uh_sum;
+};
+
+struct iovec {
+ size_t len;
+ char *buf;
+};
+
+#define iov_base buf
+#define iov_len len
+
+struct ifa_msghdr {
+ uint16_t ifam_msglen;
+ unsigned char ifam_version;
+ unsigned char ifam_type;
+ uint32_t ifam_addrs;
+ uint32_t ifam_flags;
+ uint16_t ifam_index;
+ uint32_t ifam_metric;
+};
+
+struct ifdevmtu {
+ int ifdm_current;
+ int ifdm_min;
+ int ifdm_max;
+};
+
+struct ifkpi {
+ unsigned int ifk_module_id;
+ unsigned int ifk_type;
+ union {
+ void *ifk_ptr;
+ int ifk_value;
+ } ifk_data;
+};
+#endif
+
+#if defined(_WIN32)
+int Win_getifaddrs(struct ifaddrs**);
+#define getifaddrs(interfaces) (int)Win_getifaddrs(interfaces)
+int win_if_nametoindex(const char *);
+#define if_nametoindex(x) win_if_nametoindex(x)
+#endif
+
+#define mtx_lock(arg1)
+#define mtx_unlock(arg1)
+#define mtx_assert(arg1,arg2)
+#define MA_OWNED 7 /* sys/mutex.h typically on FreeBSD */
+#if !defined(__FreeBSD__)
+struct mtx {int dummy;};
+#if !defined(__NetBSD__)
+struct selinfo {int dummy;};
+#endif
+struct sx {int dummy;};
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+/* #include <sys/param.h> in FreeBSD defines MSIZE */
+/* #include <sys/ktr.h> */
+/* #include <sys/systm.h> */
+#if defined(HAVE_SYS_QUEUE_H)
+#include <sys/queue.h>
+#else
+#include <user_queue.h>
+#endif
+#include <user_malloc.h>
+/* #include <sys/kernel.h> */
+/* #include <sys/sysctl.h> */
+/* #include <sys/protosw.h> */
+/* on FreeBSD, this results in a redefintion of SOCK(BUF)_(UN)LOCK and
+ * uknown type of struct mtx for sb_mtx in struct sockbuf */
+#include "user_socketvar.h" /* MALLOC_DECLARE's M_PCB. Replacement for sys/socketvar.h */
+/* #include <sys/jail.h> */
+/* #include <sys/sysctl.h> */
+#include <user_environment.h>
+#include <user_atomic.h>
+#include <user_mbuf.h>
+/* #include <sys/uio.h> */
+/* #include <sys/lock.h> */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/rwlock.h>
+#endif
+/* #include <sys/kthread.h> */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/priv.h>
+#endif
+/* #include <sys/random.h> */
+#include <limits.h>
+/* #include <machine/cpu.h> */
+
+#if defined(__APPLE__)
+/* was a 0 byte file. needed for structs if_data(64) and net_event_data */
+#include <net/if_var.h>
+#endif
+#if defined(__FreeBSD__)
+#include <net/if_types.h>
+/* #include <net/if_var.h> was a 0 byte file. causes struct mtx redefinition */
+#endif
+/* OOTB only - dummy route used at the moment. should we port route to
+ * userspace as well? */
+/* on FreeBSD, this results in a redefintion of struct route */
+/* #include <net/route.h> */
+#if !defined(_WIN32) && !defined(__native_client__)
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#endif
+#if defined(HAVE_NETINET_IP_ICMP_H)
+#include <netinet/ip_icmp.h>
+#else
+#include <user_ip_icmp.h>
+#endif
+/* #include <netinet/in_pcb.h> ported to userspace */
+#include <user_inpcb.h>
+
+/* for getifaddrs */
+#include <sys/types.h>
+#if !defined(_WIN32)
+#if defined(INET) || defined(INET6)
+#include <ifaddrs.h>
+#endif
+
+/* for ioctl */
+#include <sys/ioctl.h>
+
+/* for close, etc. */
+#include <unistd.h>
+/* for gettimeofday */
+#include <sys/time.h>
+#endif
+
+/* lots of errno's used and needed in userspace */
+
+/* for offsetof */
+#include <stddef.h>
+
+#if defined(SCTP_PROCESS_LEVEL_LOCKS) && !defined(_WIN32)
+/* for pthread_mutex_lock, pthread_mutex_unlock, etc. */
+#include <pthread.h>
+#endif
+
+#ifdef IPSEC
+#include <netipsec/ipsec.h>
+#include <netipsec/key.h>
+#endif /* IPSEC */
+
+#ifdef INET6
+#if defined(__FreeBSD__)
+#include <sys/domain.h>
+#endif
+#ifdef IPSEC
+#include <netipsec/ipsec6.h>
+#endif
+#if !defined(_WIN32)
+#include <netinet/ip6.h>
+#endif
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(_WIN32)
+#include "user_ip6_var.h"
+#else
+#include <netinet6/ip6_var.h>
+#endif
+#if defined(__FreeBSD__)
+#include <netinet6/in6_pcb.h>
+#include <netinet6/ip6protosw.h>
+/* #include <netinet6/nd6.h> was a 0 byte file */
+#include <netinet6/scope6_var.h>
+#endif
+#endif /* INET6 */
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#endif
+
+#include "netinet/sctp_sha1.h"
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/ip_options.h>
+#endif
+
+#define SCTP_PRINTF(...) \
+ if (SCTP_BASE_VAR(debug_printf)) { \
+ SCTP_BASE_VAR(debug_printf)(__VA_ARGS__); \
+ }
+
+/* Declare all the malloc names for all the various mallocs */
+MALLOC_DECLARE(SCTP_M_MAP);
+MALLOC_DECLARE(SCTP_M_STRMI);
+MALLOC_DECLARE(SCTP_M_STRMO);
+MALLOC_DECLARE(SCTP_M_ASC_ADDR);
+MALLOC_DECLARE(SCTP_M_ASC_IT);
+MALLOC_DECLARE(SCTP_M_AUTH_CL);
+MALLOC_DECLARE(SCTP_M_AUTH_KY);
+MALLOC_DECLARE(SCTP_M_AUTH_HL);
+MALLOC_DECLARE(SCTP_M_AUTH_IF);
+MALLOC_DECLARE(SCTP_M_STRESET);
+MALLOC_DECLARE(SCTP_M_CMSG);
+MALLOC_DECLARE(SCTP_M_COPYAL);
+MALLOC_DECLARE(SCTP_M_VRF);
+MALLOC_DECLARE(SCTP_M_IFA);
+MALLOC_DECLARE(SCTP_M_IFN);
+MALLOC_DECLARE(SCTP_M_TIMW);
+MALLOC_DECLARE(SCTP_M_MVRF);
+MALLOC_DECLARE(SCTP_M_ITER);
+MALLOC_DECLARE(SCTP_M_SOCKOPT);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+#define SCTP_GET_CYCLECOUNT get_cyclecount()
+#define SCTP_CTR6 sctp_log_trace
+
+#else
+#define SCTP_CTR6 CTR6
+#endif
+
+/* Empty ktr statement for _Userspace__ (similar to what is done for mac) */
+#define CTR6(m, d, p1, p2, p3, p4, p5, p6)
+
+
+
+#define SCTP_BASE_INFO(__m) system_base_info.sctppcbinfo.__m
+#define SCTP_BASE_STATS system_base_info.sctpstat
+#define SCTP_BASE_STAT(__m) system_base_info.sctpstat.__m
+#define SCTP_BASE_SYSCTL(__m) system_base_info.sctpsysctl.__m
+#define SCTP_BASE_VAR(__m) system_base_info.__m
+
+/*
+ *
+ */
+#if !defined(__APPLE__)
+#define USER_ADDR_NULL (NULL) /* FIX ME: temp */
+#endif
+
+#include <netinet/sctp_constants.h>
+#if defined(SCTP_DEBUG)
+#define SCTPDBG(level, ...) \
+{ \
+ do { \
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level) { \
+ SCTP_PRINTF(__VA_ARGS__); \
+ } \
+ } while (0); \
+}
+#define SCTPDBG_ADDR(level, addr) \
+{ \
+ do { \
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \
+ sctp_print_address(addr); \
+ } \
+ } while (0); \
+}
+#else
+#define SCTPDBG(level, ...)
+#define SCTPDBG_ADDR(level, addr)
+#endif
+
+#ifdef SCTP_LTRACE_CHUNKS
+#define SCTP_LTRACE_CHK(a, b, c, d) if(sctp_logging_level & SCTP_LTRACE_CHUNK_ENABLE) CTR6(KTR_SUBSYS, "SCTP:%d[%d]:%x-%x-%x-%x", SCTP_LOG_CHUNK_PROC, 0, a, b, c, d)
+#else
+#define SCTP_LTRACE_CHK(a, b, c, d)
+#endif
+
+#ifdef SCTP_LTRACE_ERRORS
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) \
+ if (sctp_logging_level & SCTP_LTRACE_ERROR_ENABLE) \
+ SCTP_PRINTF("mbuf:%p inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+ (void *)m, (void *)inp, (void *)stcb, (void *)net, file, __LINE__, err);
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) \
+ if (sctp_logging_level & SCTP_LTRACE_ERROR_ENABLE) \
+ SCTP_PRINTF("inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+ (void *)inp, (void *)stcb, (void *)net, file, __LINE__, err);
+#else
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err)
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err)
+#endif
+
+
+/*
+ * Local address and interface list handling
+ */
+#define SCTP_MAX_VRF_ID 0
+#define SCTP_SIZE_OF_VRF_HASH 3
+#define SCTP_IFNAMSIZ IFNAMSIZ
+#define SCTP_DEFAULT_VRFID 0
+#define SCTP_VRF_ADDR_HASH_SIZE 16
+#define SCTP_VRF_IFN_HASH_SIZE 3
+#define SCTP_INIT_VRF_TABLEID(vrf)
+
+#if !defined(_WIN32)
+#define SCTP_IFN_IS_IFT_LOOP(ifn) (strncmp((ifn)->ifn_name, "lo", 2) == 0)
+/* BSD definition */
+/* #define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP) */
+/* only used in IPv6 scenario, which isn't supported yet */
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) 0
+
+/*
+ * Access to IFN's to help with src-addr-selection
+ */
+/* This could return VOID if the index works but for BSD we provide both. */
+#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) (void *)ro->ro_rt->rt_ifp
+#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) 1 /* compiles... TODO use routing socket to determine */
+#define SCTP_ROUTE_HAS_VALID_IFN(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifp)
+#endif
+
+/*
+ * general memory allocation
+ */
+#define SCTP_MALLOC(var, type, size, name) \
+ do { \
+ MALLOC(var, type, size, name, M_NOWAIT); \
+ } while (0)
+
+#define SCTP_FREE(var, type) FREE(var, type)
+
+#define SCTP_MALLOC_SONAME(var, type, size) \
+ do { \
+ MALLOC(var, type, size, M_SONAME, (M_WAITOK | M_ZERO)); \
+ } while (0)
+
+#define SCTP_FREE_SONAME(var) FREE(var, M_SONAME)
+
+#define SCTP_PROCESS_STRUCT struct proc *
+
+/*
+ * zone allocation functions
+ */
+
+
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+/*typedef size_t sctp_zone_t;*/
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+ zone = size; \
+}
+
+/* __Userspace__ SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone, type) \
+ (type *)malloc(zone);
+
+
+/* __Userspace__ SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) { \
+ free(element); \
+}
+
+#define SCTP_ZONE_DESTROY(zone)
+#else
+/*__Userspace__
+ Compiling & linking notes: Needs libumem, which has been placed in ./user_lib
+ All userspace header files are in ./user_include. Makefile will need the
+ following.
+ CFLAGS = -I./ -Wall
+ LDFLAGS = -L./user_lib -R./user_lib -lumem
+*/
+#include "user_include/umem.h"
+
+/* __Userspace__ SCTP_ZONE_INIT: initialize the zone */
+/*
+ __Userspace__
+ No equivalent function to uma_zone_set_max added yet. (See SCTP_ZONE_INIT in sctp_os_bsd.h
+ for reference). It may not be required as mentioned in
+ http://nixdoc.net/man-pages/FreeBSD/uma_zalloc.9.html that
+ max limits may not enforced on systems with more than one CPU.
+*/
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+ zone = umem_cache_create(name, size, 0, NULL, NULL, NULL, NULL, NULL, 0); \
+ }
+
+/* __Userspace__ SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone, type) \
+ (type *)umem_cache_alloc(zone, UMEM_DEFAULT);
+
+
+/* __Userspace__ SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) \
+ umem_cache_free(zone, element);
+
+
+/* __Userspace__ SCTP_ZONE_DESTROY: destroy the zone */
+#define SCTP_ZONE_DESTROY(zone) \
+ umem_cache_destroy(zone);
+#endif
+
+/*
+ * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
+ */
+void *sctp_hashinit_flags(int elements, struct malloc_type *type,
+ u_long *hashmask, int flags);
+void
+sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask);
+
+void
+sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask);
+
+
+#define HASH_NOWAIT 0x00000001
+#define HASH_WAITOK 0x00000002
+
+/* M_PCB is MALLOC_DECLARE'd in sys/socketvar.h */
+#define SCTP_HASH_INIT(size, hashmark) sctp_hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT)
+
+#define SCTP_HASH_FREE(table, hashmark) sctp_hashdestroy(table, M_PCB, hashmark)
+
+#define SCTP_HASH_FREE_DESTROY(table, hashmark) sctp_hashfreedestroy(table, M_PCB, hashmark)
+#define SCTP_M_COPYM m_copym
+
+/*
+ * timers
+ */
+/* __Userspace__
+ * user_sctp_callout.h has typedef struct sctp_callout sctp_os_timer_t;
+ * which is used in the timer related functions such as
+ * SCTP_OS_TIMER_INIT etc.
+*/
+#include <netinet/sctp_callout.h>
+
+/* __Userspace__ Creating a receive thread */
+#include <user_recv_thread.h>
+
+/*__Userspace__ defining KTR_SUBSYS 1 as done in sctp_os_macosx.h */
+#define KTR_SUBSYS 1
+
+/* The packed define for 64 bit platforms */
+#if !defined(_WIN32)
+#define SCTP_PACKED __attribute__((packed))
+#define SCTP_UNUSED __attribute__((unused))
+#else
+#define SCTP_PACKED
+#define SCTP_UNUSED
+#endif
+
+/*
+ * Functions
+ */
+/* Mbuf manipulation and access macros */
+#define SCTP_BUF_LEN(m) (m->m_len)
+#define SCTP_BUF_NEXT(m) (m->m_next)
+#define SCTP_BUF_NEXT_PKT(m) (m->m_nextpkt)
+#define SCTP_BUF_RESV_UF(m, size) m->m_data += size
+#define SCTP_BUF_AT(m, size) m->m_data + size
+#define SCTP_BUF_IS_EXTENDED(m) (m->m_flags & M_EXT)
+#define SCTP_BUF_EXTEND_SIZE(m) (m->m_ext.ext_size)
+#define SCTP_BUF_TYPE(m) (m->m_type)
+#define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif)
+#define SCTP_BUF_PREPEND M_PREPEND
+
+#define SCTP_ALIGN_TO_END(m, len) if(m->m_flags & M_PKTHDR) { \
+ MH_ALIGN(m, len); \
+ } else if ((m->m_flags & M_EXT) == 0) { \
+ M_ALIGN(m, len); \
+ }
+
+#if !defined(_WIN32)
+#define SCTP_SNPRINTF(data, ...) \
+ if (snprintf(data, __VA_ARGS__) < 0) { \
+ data[0] = '\0'; \
+ }
+#endif
+
+/* We make it so if you have up to 4 threads
+ * writting based on the default size of
+ * the packet log 65 k, that would be
+ * 4 16k packets before we would hit
+ * a problem.
+ */
+#define SCTP_PKTLOG_WRITERS_NEED_LOCK 3
+
+
+/*
+ * routes, output, etc.
+ */
+
+typedef struct sctp_route sctp_route_t;
+typedef struct sctp_rtentry sctp_rtentry_t;
+
+static inline void sctp_userspace_rtalloc(sctp_route_t *ro)
+{
+ if (ro->ro_rt != NULL) {
+ ro->ro_rt->rt_refcnt++;
+ return;
+ }
+
+ ro->ro_rt = (sctp_rtentry_t *) malloc(sizeof(sctp_rtentry_t));
+ if (ro->ro_rt == NULL)
+ return;
+
+ /* initialize */
+ memset(ro->ro_rt, 0, sizeof(sctp_rtentry_t));
+ ro->ro_rt->rt_refcnt = 1;
+
+ /* set MTU */
+ /* TODO set this based on the ro->ro_dst, looking up MTU with routing socket */
+#if 0
+ if (userspace_rawroute == -1) {
+ userspace_rawroute = socket(AF_ROUTE, SOCK_RAW, 0);
+ if (userspace_rawroute == -1)
+ return;
+ }
+#endif
+ ro->ro_rt->rt_rmx.rmx_mtu = 1500; /* FIXME temporary solution */
+
+ /* TODO enable the ability to obtain interface index of route for
+ * SCTP_GET_IF_INDEX_FROM_ROUTE macro.
+ */
+}
+#define SCTP_RTALLOC(ro, vrf_id, fibnum) sctp_userspace_rtalloc((sctp_route_t *)ro)
+
+/* dummy rtfree needed once user_route.h is included */
+static inline void sctp_userspace_rtfree(sctp_rtentry_t *rt)
+{
+ if(rt == NULL) {
+ return;
+ }
+ if(--rt->rt_refcnt > 0) {
+ return;
+ }
+ free(rt);
+}
+#define rtfree(arg1) sctp_userspace_rtfree(arg1)
+
+
+/*************************/
+/* MTU */
+/*************************/
+int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af);
+
+#define SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, af) sctp_userspace_get_mtu_from_ifn(ifn_index, af)
+
+#define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, rt) ((rt != NULL) ? rt->rt_rmx.rmx_mtu : 0)
+
+#define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn) sctp_userspace_get_mtu_from_ifn(if_nametoindex(((struct ifaddrs *) (sctp_ifn))->ifa_name), AF_INET)
+
+#define SCTP_SET_MTU_OF_ROUTE(sa, rt, mtu) do { \
+ if (rt != NULL) \
+ rt->rt_rmx.rmx_mtu = mtu; \
+ } while(0)
+
+
+/*************************/
+/* These are for logging */
+/*************************/
+/* return the base ext data pointer */
+#define SCTP_BUF_EXTEND_BASE(m) (m->m_ext.ext_buf)
+ /* return the refcnt of the data pointer */
+#define SCTP_BUF_EXTEND_REFCNT(m) (*m->m_ext.ref_cnt)
+/* return any buffer related flags, this is
+ * used beyond logging for apple only.
+ */
+#define SCTP_BUF_GET_FLAGS(m) (m->m_flags)
+
+/* For BSD this just accesses the M_PKTHDR length
+ * so it operates on an mbuf with hdr flag. Other
+ * O/S's may have seperate packet header and mbuf
+ * chain pointers.. thus the macro.
+ */
+#define SCTP_HEADER_TO_CHAIN(m) (m)
+#define SCTP_DETACH_HEADER_FROM_CHAIN(m)
+#define SCTP_HEADER_LEN(m) ((m)->m_pkthdr.len)
+#define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0
+#define SCTP_RELEASE_HEADER(m)
+#define SCTP_RELEASE_PKT(m) sctp_m_freem(m)
+
+#define SCTP_GET_PKT_VRFID(m, vrf_id) ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID)
+
+
+
+/* Attach the chain of data into the sendable packet. */
+#define SCTP_ATTACH_CHAIN(pak, m, packet_length) do { \
+ pak = m; \
+ pak->m_pkthdr.len = packet_length; \
+ } while(0)
+
+/* Other m_pkthdr type things */
+/* FIXME need real definitions */
+#define SCTP_IS_IT_BROADCAST(dst, m) 0
+/* OOTB only #define SCTP_IS_IT_BROADCAST(dst, m) ((m->m_flags & M_PKTHDR) ? in_broadcast(dst, m->m_pkthdr.rcvif) : 0) BSD def */
+#define SCTP_IS_IT_LOOPBACK(m) 0
+/* OOTB ONLY #define SCTP_IS_IT_LOOPBACK(m) ((m->m_flags & M_PKTHDR) && ((m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type == IFT_LOOP))) BSD def */
+
+
+/* This converts any input packet header
+ * into the chain of data holders, for BSD
+ * its a NOP.
+ */
+
+/* get the v6 hop limit */
+#define SCTP_GET_HLIM(inp, ro) 128
+#define IPv6_HOP_LIMIT 128
+
+/* is the endpoint v6only? */
+#define SCTP_IPV6_V6ONLY(sctp_inpcb) ((sctp_inpcb)->ip_inp.inp.inp_flags & IN6P_IPV6_V6ONLY)
+/* is the socket non-blocking? */
+#define SCTP_SO_IS_NBIO(so) ((so)->so_state & SS_NBIO)
+#define SCTP_SET_SO_NBIO(so) ((so)->so_state |= SS_NBIO)
+#define SCTP_CLEAR_SO_NBIO(so) ((so)->so_state &= ~SS_NBIO)
+/* get the socket type */
+#define SCTP_SO_TYPE(so) ((so)->so_type)
+
+/* reserve sb space for a socket */
+#define SCTP_SORESERVE(so, send, recv) soreserve(so, send, recv)
+
+/* wakeup a socket */
+#define SCTP_SOWAKEUP(so) wakeup(&(so)->so_timeo, so)
+/* clear the socket buffer state */
+#define SCTP_SB_CLEAR(sb) \
+ (sb).sb_cc = 0; \
+ (sb).sb_mb = NULL; \
+ (sb).sb_mbcnt = 0;
+
+#define SCTP_SB_LIMIT_RCV(so) so->so_rcv.sb_hiwat
+#define SCTP_SB_LIMIT_SND(so) so->so_snd.sb_hiwat
+
+#define SCTP_READ_RANDOM(buf, len) read_random(buf, len)
+
+#define SCTP_SHA1_CTX struct sctp_sha1_context
+#define SCTP_SHA1_INIT sctp_sha1_init
+#define SCTP_SHA1_UPDATE sctp_sha1_update
+#define SCTP_SHA1_FINAL(x,y) sctp_sha1_final((unsigned char *)x, y)
+
+/* start OOTB only stuff */
+/* TODO IFT_LOOP is in net/if_types.h on Linux */
+#define IFT_LOOP 0x18
+
+/* sctp_pcb.h */
+
+#if defined(_WIN32)
+#define SHUT_RD 1
+#define SHUT_WR 2
+#define SHUT_RDWR 3
+#endif
+#define PRU_FLUSH_RD SHUT_RD
+#define PRU_FLUSH_WR SHUT_WR
+#define PRU_FLUSH_RDWR SHUT_RDWR
+
+/* netinet/ip_var.h defintions are behind an if defined for _KERNEL on FreeBSD */
+#define IP_RAWOUTPUT 0x2
+
+
+/* end OOTB only stuff */
+
+#define AF_CONN 123
+struct sockaddr_conn {
+#ifdef HAVE_SCONN_LEN
+ uint8_t sconn_len;
+ uint8_t sconn_family;
+#else
+ uint16_t sconn_family;
+#endif
+ uint16_t sconn_port;
+ void *sconn_addr;
+};
+
+typedef void *(*start_routine_t)(void *);
+
+extern int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine);
+
+void
+sctp_userspace_set_threadname(const char *name);
+
+/*
+ * SCTP protocol specific mbuf flags.
+ */
+#define M_NOTIFICATION M_PROTO5 /* SCTP notification */
+
+/*
+ * IP output routines
+ */
+
+/* Defining SCTP_IP_ID macro.
+ In netinet/ip_output.c, we have u_short ip_id;
+ In netinet/ip_var.h, we have extern u_short ip_id; (enclosed within _KERNEL_)
+ See static __inline uint16_t ip_newid(void) in netinet/ip_var.h
+ */
+#define SCTP_IP_ID(inp) (ip_id)
+
+/* need sctphdr to get port in SCTP_IP_OUTPUT. sctphdr defined in sctp.h */
+#include <netinet/sctp.h>
+extern void sctp_userspace_ip_output(int *result, struct mbuf *o_pak,
+ sctp_route_t *ro, void *stcb,
+ uint32_t vrf_id);
+
+#define SCTP_IP_OUTPUT(result, o_pak, ro, stcb, vrf_id) sctp_userspace_ip_output(&result, o_pak, ro, stcb, vrf_id);
+
+#if defined(INET6)
+extern void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak,
+ struct route_in6 *ro, void *stcb,
+ uint32_t vrf_id);
+#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) sctp_userspace_ip6_output(&result, o_pak, ro, stcb, vrf_id);
+#endif
+
+
+
+#if 0
+#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) \
+{ \
+ if (stcb && stcb->sctp_ep) \
+ result = ip6_output(o_pak, \
+ ((struct inpcb *)(stcb->sctp_ep))->in6p_outputopts, \
+ (ro), 0, 0, ifp, NULL); \
+ else \
+ result = ip6_output(o_pak, NULL, (ro), 0, 0, ifp, NULL); \
+}
+#endif
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type);
+
+
+/* with the current included files, this is defined in Linux but
+ * in FreeBSD, it is behind a _KERNEL in sys/socket.h ...
+ */
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__native_client__)
+/* stolen from /usr/include/sys/socket.h */
+#define CMSG_ALIGN(n) _ALIGN(n)
+#elif defined(__NetBSD__)
+#define CMSG_ALIGN(n) (((n) + __ALIGNBYTES) & ~__ALIGNBYTES)
+#elif defined(__APPLE__)
+#if !defined(__DARWIN_ALIGNBYTES)
+#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1)
+#endif
+
+#if !defined(__DARWIN_ALIGN)
+#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(uintptr_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+#endif
+
+#if !defined(__DARWIN_ALIGNBYTES32)
+#define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1)
+#endif
+
+#if !defined(__DARWIN_ALIGN32)
+#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(uintptr_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32)
+#endif
+#define CMSG_ALIGN(n) __DARWIN_ALIGN32(n)
+#endif
+#define I_AM_HERE \
+ do { \
+ SCTP_PRINTF("%s:%d at %s\n", __FILE__, __LINE__ , __func__); \
+ } while (0)
+
+#ifndef timevalsub
+#define timevalsub(tp1, tp2) \
+ do { \
+ (tp1)->tv_sec -= (tp2)->tv_sec; \
+ (tp1)->tv_usec -= (tp2)->tv_usec; \
+ if ((tp1)->tv_usec < 0) { \
+ (tp1)->tv_sec--; \
+ (tp1)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif
+
+#if defined(__linux__)
+#if !defined(TAILQ_FOREACH_SAFE)
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+#endif
+#if !defined(LIST_FOREACH_SAFE)
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = ((head)->lh_first); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+#endif
+#endif
+#if defined(__DragonFly__)
+#define TAILQ_FOREACH_SAFE TAILQ_FOREACH_MUTABLE
+#define LIST_FOREACH_SAFE LIST_FOREACH_MUTABLE
+#endif
+
+#if defined(__native_client__)
+#define timercmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#endif
+
+#define SCTP_IS_LISTENING(inp) ((inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) != 0)
+
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__linux__) || defined(__native_client__) || defined(__NetBSD__) || defined(_WIN32) || defined(__Fuchsia__)
+int
+timingsafe_bcmp(const void *, const void *, size_t);
+#endif
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.c
new file mode 100644
index 000000000..a494b2c0a
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.c
@@ -0,0 +1,14993 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 362178 2020-06-14 16:05:08Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_crc32.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_kdtrace.h>
+#endif
+#if defined(__linux__)
+#define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
+#endif
+#if defined(INET) || defined(INET6)
+#if !defined(_WIN32)
+#include <netinet/udp.h>
+#endif
+#endif
+#if !defined(__Userspace__)
+#if defined(__APPLE__)
+#include <netinet/in.h>
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/udp_var.h>
+#include <machine/in_cksum.h>
+#endif
+#endif
+#if defined(__Userspace__) && defined(INET6)
+#include <netinet6/sctp6_var.h>
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
+#define SCTP_MAX_LINKHDR 16
+#endif
+#endif
+
+#define SCTP_MAX_GAPS_INARRAY 4
+struct sack_track {
+ uint8_t right_edge; /* mergable on the right edge */
+ uint8_t left_edge; /* mergable on the left edge */
+ uint8_t num_entries;
+ uint8_t spare;
+ struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
+};
+
+const struct sack_track sack_array[256] = {
+ {0, 0, 0, 0, /* 0x00 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x01 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x02 */
+ {{1, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x03 */
+ {{0, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x04 */
+ {{2, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x05 */
+ {{0, 0},
+ {2, 2},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x06 */
+ {{1, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x07 */
+ {{0, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x08 */
+ {{3, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x09 */
+ {{0, 0},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x0a */
+ {{1, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0b */
+ {{0, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0c */
+ {{2, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0d */
+ {{0, 0},
+ {2, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0e */
+ {{1, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x0f */
+ {{0, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x10 */
+ {{4, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x11 */
+ {{0, 0},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x12 */
+ {{1, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x13 */
+ {{0, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x14 */
+ {{2, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x15 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x16 */
+ {{1, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x17 */
+ {{0, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x18 */
+ {{3, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x19 */
+ {{0, 0},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x1a */
+ {{1, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1b */
+ {{0, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1c */
+ {{2, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1d */
+ {{0, 0},
+ {2, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1e */
+ {{1, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x1f */
+ {{0, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x20 */
+ {{5, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x21 */
+ {{0, 0},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x22 */
+ {{1, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x23 */
+ {{0, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x24 */
+ {{2, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x25 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x26 */
+ {{1, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x27 */
+ {{0, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x28 */
+ {{3, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x29 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x2a */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2b */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2c */
+ {{2, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2d */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2e */
+ {{1, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x2f */
+ {{0, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x30 */
+ {{4, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x31 */
+ {{0, 0},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x32 */
+ {{1, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x33 */
+ {{0, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x34 */
+ {{2, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x35 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x36 */
+ {{1, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x37 */
+ {{0, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x38 */
+ {{3, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x39 */
+ {{0, 0},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x3a */
+ {{1, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3b */
+ {{0, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3c */
+ {{2, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3d */
+ {{0, 0},
+ {2, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3e */
+ {{1, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x3f */
+ {{0, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x40 */
+ {{6, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x41 */
+ {{0, 0},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x42 */
+ {{1, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x43 */
+ {{0, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x44 */
+ {{2, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x45 */
+ {{0, 0},
+ {2, 2},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x46 */
+ {{1, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x47 */
+ {{0, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x48 */
+ {{3, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x49 */
+ {{0, 0},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x4a */
+ {{1, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4b */
+ {{0, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4c */
+ {{2, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4d */
+ {{0, 0},
+ {2, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4e */
+ {{1, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x4f */
+ {{0, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x50 */
+ {{4, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x51 */
+ {{0, 0},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x52 */
+ {{1, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x53 */
+ {{0, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x54 */
+ {{2, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 4, 0, /* 0x55 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 6}
+ }
+ },
+ {0, 0, 3, 0, /* 0x56 */
+ {{1, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x57 */
+ {{0, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x58 */
+ {{3, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x59 */
+ {{0, 0},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x5a */
+ {{1, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5b */
+ {{0, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5c */
+ {{2, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5d */
+ {{0, 0},
+ {2, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5e */
+ {{1, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x5f */
+ {{0, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x60 */
+ {{5, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x61 */
+ {{0, 0},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x62 */
+ {{1, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x63 */
+ {{0, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x64 */
+ {{2, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x65 */
+ {{0, 0},
+ {2, 2},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x66 */
+ {{1, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x67 */
+ {{0, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x68 */
+ {{3, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x69 */
+ {{0, 0},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x6a */
+ {{1, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6b */
+ {{0, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6c */
+ {{2, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6d */
+ {{0, 0},
+ {2, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6e */
+ {{1, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x6f */
+ {{0, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x70 */
+ {{4, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x71 */
+ {{0, 0},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x72 */
+ {{1, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x73 */
+ {{0, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x74 */
+ {{2, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x75 */
+ {{0, 0},
+ {2, 2},
+ {4, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x76 */
+ {{1, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x77 */
+ {{0, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x78 */
+ {{3, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x79 */
+ {{0, 0},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x7a */
+ {{1, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7b */
+ {{0, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7c */
+ {{2, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7d */
+ {{0, 0},
+ {2, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7e */
+ {{1, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x7f */
+ {{0, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0x80 */
+ {{7, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x81 */
+ {{0, 0},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x82 */
+ {{1, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x83 */
+ {{0, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x84 */
+ {{2, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x85 */
+ {{0, 0},
+ {2, 2},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x86 */
+ {{1, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x87 */
+ {{0, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x88 */
+ {{3, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x89 */
+ {{0, 0},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x8a */
+ {{1, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8b */
+ {{0, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8c */
+ {{2, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8d */
+ {{0, 0},
+ {2, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8e */
+ {{1, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x8f */
+ {{0, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x90 */
+ {{4, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x91 */
+ {{0, 0},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x92 */
+ {{1, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x93 */
+ {{0, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x94 */
+ {{2, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0x95 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0x96 */
+ {{1, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x97 */
+ {{0, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x98 */
+ {{3, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x99 */
+ {{0, 0},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x9a */
+ {{1, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9b */
+ {{0, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9c */
+ {{2, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9d */
+ {{0, 0},
+ {2, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9e */
+ {{1, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x9f */
+ {{0, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xa0 */
+ {{5, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa1 */
+ {{0, 0},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa2 */
+ {{1, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa3 */
+ {{0, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa4 */
+ {{2, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa5 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa6 */
+ {{1, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa7 */
+ {{0, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa8 */
+ {{3, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa9 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 4, 0, /* 0xaa */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {1, 1, 4, 0, /* 0xab */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xac */
+ {{2, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xad */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xae */
+ {{1, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xaf */
+ {{0, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb0 */
+ {{4, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb1 */
+ {{0, 0},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb2 */
+ {{1, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb3 */
+ {{0, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb4 */
+ {{2, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xb5 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb6 */
+ {{1, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb7 */
+ {{0, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb8 */
+ {{3, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb9 */
+ {{0, 0},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xba */
+ {{1, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbb */
+ {{0, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbc */
+ {{2, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbd */
+ {{0, 0},
+ {2, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbe */
+ {{1, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xbf */
+ {{0, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xc0 */
+ {{6, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc1 */
+ {{0, 0},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc2 */
+ {{1, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc3 */
+ {{0, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc4 */
+ {{2, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc5 */
+ {{0, 0},
+ {2, 2},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc6 */
+ {{1, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc7 */
+ {{0, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc8 */
+ {{3, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc9 */
+ {{0, 0},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xca */
+ {{1, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcb */
+ {{0, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xcc */
+ {{2, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcd */
+ {{0, 0},
+ {2, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xce */
+ {{1, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xcf */
+ {{0, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd0 */
+ {{4, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd1 */
+ {{0, 0},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd2 */
+ {{1, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd3 */
+ {{0, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd4 */
+ {{2, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xd5 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd6 */
+ {{1, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd7 */
+ {{0, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd8 */
+ {{3, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd9 */
+ {{0, 0},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xda */
+ {{1, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdb */
+ {{0, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xdc */
+ {{2, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdd */
+ {{0, 0},
+ {2, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xde */
+ {{1, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xdf */
+ {{0, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xe0 */
+ {{5, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe1 */
+ {{0, 0},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe2 */
+ {{1, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe3 */
+ {{0, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe4 */
+ {{2, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe5 */
+ {{0, 0},
+ {2, 2},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe6 */
+ {{1, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe7 */
+ {{0, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe8 */
+ {{3, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe9 */
+ {{0, 0},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xea */
+ {{1, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xeb */
+ {{0, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xec */
+ {{2, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xed */
+ {{0, 0},
+ {2, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xee */
+ {{1, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xef */
+ {{0, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf0 */
+ {{4, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf1 */
+ {{0, 0},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf2 */
+ {{1, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf3 */
+ {{0, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf4 */
+ {{2, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xf5 */
+ {{0, 0},
+ {2, 2},
+ {4, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf6 */
+ {{1, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf7 */
+ {{0, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf8 */
+ {{3, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf9 */
+ {{0, 0},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xfa */
+ {{1, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfb */
+ {{0, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfc */
+ {{2, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfd */
+ {{0, 0},
+ {2, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfe */
+ {{1, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 1, 0, /* 0xff */
+ {{0, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ }
+};
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+ struct sctp_scoping *scope,
+ int do_update)
+{
+ if ((scope->loopback_scope == 0) &&
+ (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
+ /*
+ * skip loopback if not in scope *
+ */
+ return (0);
+ }
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (scope->ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = &ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* not in scope , unspecified */
+ return (0);
+ }
+ if ((scope->ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ /* private address not in scope */
+ return (0);
+ }
+ } else {
+ return (0);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (scope->ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ /* Must update the flags, bummer, which
+ * means any IFA locks must now be applied HERE <->
+ */
+ if (do_update) {
+ sctp_gather_internal_ifa_flags(ifa);
+ }
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return (0);
+ }
+ /* ok to use deprecated addresses? */
+ sin6 = &ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* skip unspecifed addresses */
+ return (0);
+ }
+ if ( /* (local_scope == 0) && */
+ (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ if ((scope->site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ } else {
+ return (0);
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (!scope->conn_addr_legal) {
+ return (0);
+ }
+ break;
+#endif
+ default:
+ return (0);
+ }
+ return (1);
+}
+
+static struct mbuf *
+sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
+{
+#if defined(INET) || defined(INET6)
+ struct sctp_paramhdr *paramh;
+ struct mbuf *mret;
+ uint16_t plen;
+#endif
+
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
+ break;
+#endif
+ default:
+ return (m);
+ }
+#if defined(INET) || defined(INET6)
+ if (M_TRAILINGSPACE(m) >= plen) {
+ /* easy side we just drop it on the end */
+ paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
+ mret = m;
+ } else {
+ /* Need more space */
+ mret = m;
+ while (SCTP_BUF_NEXT(mret) != NULL) {
+ mret = SCTP_BUF_NEXT(mret);
+ }
+ SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
+ if (SCTP_BUF_NEXT(mret) == NULL) {
+ /* We are hosed, can't add more addresses */
+ return (m);
+ }
+ mret = SCTP_BUF_NEXT(mret);
+ paramh = mtod(mret, struct sctp_paramhdr *);
+ }
+ /* now add the parameter */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sctp_ipv4addr_param *ipv4p;
+ struct sockaddr_in *sin;
+
+ sin = &ifa->address.sin;
+ ipv4p = (struct sctp_ipv4addr_param *)paramh;
+ paramh->param_type = htons(SCTP_IPV4_ADDRESS);
+ paramh->param_length = htons(plen);
+ ipv4p->addr = sin->sin_addr.s_addr;
+ SCTP_BUF_LEN(mret) += plen;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sctp_ipv6addr_param *ipv6p;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = &ifa->address.sin6;
+ ipv6p = (struct sctp_ipv6addr_param *)paramh;
+ paramh->param_type = htons(SCTP_IPV6_ADDRESS);
+ paramh->param_length = htons(plen);
+ memcpy(ipv6p->addr, &sin6->sin6_addr,
+ sizeof(ipv6p->addr));
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ /* clear embedded scope in the address */
+ in6_clearscope((struct in6_addr *)ipv6p->addr);
+#endif
+ SCTP_BUF_LEN(mret) += plen;
+ break;
+ }
+#endif
+ default:
+ return (m);
+ }
+ if (len != NULL) {
+ *len += plen;
+ }
+ return (mret);
+#endif
+}
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_scoping *scope,
+ struct mbuf *m_at, int cnt_inits_to,
+ uint16_t *padding_len, uint16_t *chunk_len)
+{
+ struct sctp_vrf *vrf = NULL;
+ int cnt, limit_out = 0, total_count;
+ uint32_t vrf_id;
+
+ vrf_id = inp->def_vrf_id;
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (m_at);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_ifn *sctp_ifnp;
+
+ cnt = cnt_inits_to;
+ if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
+ limit_out = 1;
+ cnt = SCTP_ADDRESS_LIMIT;
+ goto skip_count;
+ }
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ if ((scope->loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+ /*
+ * Skip loopback devices if loopback_scope
+ * not set
+ */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifap->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifap->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+ continue;
+ }
+#if defined(__Userspace__)
+ if (sctp_ifap->address.sa.sa_family == AF_CONN) {
+ continue;
+ }
+#endif
+ if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
+ continue;
+ }
+ cnt++;
+ if (cnt > SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+ if (cnt > SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+ skip_count:
+ if (cnt > 1) {
+ total_count = 0;
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ cnt = 0;
+ if ((scope->loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+ /*
+ * Skip loopback devices if
+ * loopback_scope not set
+ */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifap->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifap->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+ continue;
+ }
+#if defined(__Userspace__)
+ if (sctp_ifap->address.sa.sa_family == AF_CONN) {
+ continue;
+ }
+#endif
+ if (sctp_is_address_in_scope(sctp_ifap,
+ scope, 0) == 0) {
+ continue;
+ }
+ if ((chunk_len != NULL) &&
+ (padding_len != NULL) &&
+ (*padding_len > 0)) {
+ memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
+ SCTP_BUF_LEN(m_at) += *padding_len;
+ *chunk_len += *padding_len;
+ *padding_len = 0;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
+ if (limit_out) {
+ cnt++;
+ total_count++;
+ if (cnt >= 2) {
+ /* two from each address */
+ break;
+ }
+ if (total_count > SCTP_ADDRESS_LIMIT) {
+ /* No more addresses */
+ break;
+ }
+ }
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ cnt = cnt_inits_to;
+ /* First, how many ? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ /* Address being deleted by the system, dont
+ * list.
+ */
+ continue;
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* Address being deleted on this ep
+ * don't list.
+ */
+ continue;
+ }
+#if defined(__Userspace__)
+ if (laddr->ifa->address.sa.sa_family == AF_CONN) {
+ continue;
+ }
+#endif
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope, 1) == 0) {
+ continue;
+ }
+ cnt++;
+ }
+ /*
+ * To get through a NAT we only list addresses if we have
+ * more than one. That way if you just bind a single address
+ * we let the source of the init dictate our address.
+ */
+ if (cnt > 1) {
+ cnt = cnt_inits_to;
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ continue;
+ }
+#if defined(__Userspace__)
+ if (laddr->ifa->address.sa.sa_family == AF_CONN) {
+ continue;
+ }
+#endif
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope, 0) == 0) {
+ continue;
+ }
+ if ((chunk_len != NULL) &&
+ (padding_len != NULL) &&
+ (*padding_len > 0)) {
+ memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
+ SCTP_BUF_LEN(m_at) += *padding_len;
+ *chunk_len += *padding_len;
+ *padding_len = 0;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
+ cnt++;
+ if (cnt >= SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (m_at);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ uint8_t dest_is_global = 0;
+ /* dest_is_priv is true if destination is a private address */
+ /* dest_is_loop is true if destination is a loopback addresses */
+
+ /**
+ * Here we determine if its a preferred address. A preferred address
+ * means it is the same scope or higher scope then the destination.
+ * L = loopback, P = private, G = global
+ * -----------------------------------------
+ * src | dest | result
+ * ----------------------------------------
+ * L | L | yes
+ * -----------------------------------------
+ * P | L | yes-v4 no-v6
+ * -----------------------------------------
+ * G | L | yes-v4 no-v6
+ * -----------------------------------------
+ * L | P | no
+ * -----------------------------------------
+ * P | P | yes
+ * -----------------------------------------
+ * G | P | no
+ * -----------------------------------------
+ * L | G | no
+ * -----------------------------------------
+ * P | G | no
+ * -----------------------------------------
+ * G | G | yes
+ * -----------------------------------------
+ */
+
+ if (ifa->address.sa.sa_family != fam) {
+ /* forget mis-matched family */
+ return (NULL);
+ }
+ if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
+ dest_is_global = 1;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
+ /* Ok the address may be ok */
+#ifdef INET6
+ if (fam == AF_INET6) {
+ /* ok to use deprecated addresses? no lets not! */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
+ return (NULL);
+ }
+ if (ifa->src_is_priv && !ifa->src_is_loop) {
+ if (dest_is_loop) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
+ return (NULL);
+ }
+ }
+ if (ifa->src_is_glob) {
+ if (dest_is_loop) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
+ return (NULL);
+ }
+ }
+ }
+#endif
+ /* Now that we know what is what, implement or table
+ * this could in theory be done slicker (it used to be), but this
+ * is straightforward and easier to validate :-)
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
+ ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
+ dest_is_loop, dest_is_priv, dest_is_global);
+
+ if ((ifa->src_is_loop) && (dest_is_priv)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_glob) && (dest_is_priv)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_loop) && (dest_is_global)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_priv) && (dest_is_global)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
+ return (NULL);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
+ /* its a preferred address */
+ return (ifa);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ uint8_t dest_is_global = 0;
+
+ /**
+ * Here we determine if its a acceptable address. A acceptable
+ * address means it is the same scope or higher scope but we can
+ * allow for NAT which means its ok to have a global dest and a
+ * private src.
+ *
+ * L = loopback, P = private, G = global
+ * -----------------------------------------
+ * src | dest | result
+ * -----------------------------------------
+ * L | L | yes
+ * -----------------------------------------
+ * P | L | yes-v4 no-v6
+ * -----------------------------------------
+ * G | L | yes
+ * -----------------------------------------
+ * L | P | no
+ * -----------------------------------------
+ * P | P | yes
+ * -----------------------------------------
+ * G | P | yes - May not work
+ * -----------------------------------------
+ * L | G | no
+ * -----------------------------------------
+ * P | G | yes - May not work
+ * -----------------------------------------
+ * G | G | yes
+ * -----------------------------------------
+ */
+
+ if (ifa->address.sa.sa_family != fam) {
+ /* forget non matching family */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
+ ifa->address.sa.sa_family, fam);
+ return (NULL);
+ }
+ /* Ok the address may be ok */
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
+ dest_is_loop, dest_is_priv);
+ if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
+ dest_is_global = 1;
+ }
+#ifdef INET6
+ if (fam == AF_INET6) {
+ /* ok to use deprecated addresses? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return (NULL);
+ }
+ if (ifa->src_is_priv) {
+ /* Special case, linklocal to loop */
+ if (dest_is_loop)
+ return (NULL);
+ }
+ }
+#endif
+ /*
+ * Now that we know what is what, implement our table.
+ * This could in theory be done slicker (it used to be), but this
+ * is straightforward and easier to validate :-)
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
+ ifa->src_is_loop,
+ dest_is_priv);
+ if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
+ return (NULL);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
+ ifa->src_is_loop,
+ dest_is_global);
+ if ((ifa->src_is_loop == 1) && (dest_is_global)) {
+ return (NULL);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
+ /* its an acceptable address */
+ return (ifa);
+}
+
+int
+sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+
+ if (stcb == NULL) {
+ /* There are no restrictions, no TCB :-) */
+ return (0);
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __func__);
+ continue;
+ }
+ if (laddr->ifa == ifa) {
+ /* Yes it is on the list */
+ return (1);
+ }
+ }
+ return (0);
+}
+
+
+int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+
+ if (ifa == NULL)
+ return (0);
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __func__);
+ continue;
+ }
+ if ((laddr->ifa == ifa) && laddr->action == 0)
+ /* same pointer */
+ return (1);
+ }
+ return (0);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
+ sctp_route_t *ro,
+ uint32_t vrf_id,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ sa_family_t fam)
+{
+ struct sctp_laddr *laddr, *starting_point;
+ void *ifn;
+ int resettotop = 0;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ struct sctp_vrf *vrf;
+ uint32_t ifn_index;
+
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want such an address. Note that we first looked for a
+ * preferred address.
+ */
+ if (sctp_ifn) {
+ /* is a preferred one on the interface we route out? */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (sctp_is_addr_in_ep(inp, sifa)) {
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+ }
+ /*
+ * ok, now we now need to find one on the list of the addresses.
+ * We can't get one on the emitting interface so let's find first
+ * a preferred one. If not that an acceptable one otherwise...
+ * we return NULL.
+ */
+ starting_point = inp->next_addr_touse;
+ once_again:
+ if (inp->next_addr_touse == NULL) {
+ inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+ resettotop = 1;
+ }
+ for (laddr = inp->next_addr_touse; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (resettotop == 0) {
+ inp->next_addr_touse = NULL;
+ goto once_again;
+ }
+
+ inp->next_addr_touse = starting_point;
+ resettotop = 0;
+ once_again_too:
+ if (inp->next_addr_touse == NULL) {
+ inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+ resettotop = 1;
+ }
+
+ /* ok, what about an acceptable address in the inp */
+ for (laddr = inp->next_addr_touse; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (resettotop == 0) {
+ inp->next_addr_touse = NULL;
+ goto once_again_too;
+ }
+
+ /*
+ * no address bound can be a source for the destination we are in
+ * trouble
+ */
+ return (NULL);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ sctp_route_t *ro,
+ uint32_t vrf_id,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ int non_asoc_addr_ok,
+ sa_family_t fam)
+{
+ struct sctp_laddr *laddr, *starting_point;
+ void *ifn;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ uint8_t start_at_beginning = 0;
+ struct sctp_vrf *vrf;
+ uint32_t ifn_index;
+
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want that one.
+ */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ sctp_ifn = sctp_find_ifn( ifn, ifn_index);
+
+ /*
+ * first question, is the ifn we will emit on in our list? If so,
+ * we want that one. First we look for a preferred. Second, we go
+ * for an acceptable.
+ */
+ if (sctp_ifn) {
+ /* first try for a preferred address on the ep */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+ continue;
+ if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+ sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+ /* next try for an acceptable address on the ep */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+ continue;
+ if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+ sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+
+ }
+ /*
+ * if we can't find one like that then we must look at all
+ * addresses bound to pick one at first preferable then
+ * secondly acceptable.
+ */
+ starting_point = stcb->asoc.last_used_address;
+ sctp_from_the_top:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ stcb->asoc.last_used_address = laddr;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top;
+ }
+ /* now try for any higher scope than the destination */
+ stcb->asoc.last_used_address = starting_point;
+ start_at_beginning = 0;
+ sctp_from_the_top2:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ stcb->asoc.last_used_address = laddr;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top2;
+ }
+ return (NULL);
+}
+
+static struct sctp_ifa *
+sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct sctp_inpcb *inp,
+#else
+ struct sctp_inpcb *inp SCTP_UNUSED,
+#endif
+ struct sctp_tcb *stcb,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ int addr_wanted,
+ sa_family_t fam,
+ sctp_route_t *ro
+ )
+{
+ struct sctp_ifa *ifa, *sifa;
+ int num_eligible_addr = 0;
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ struct sockaddr_in6 sin6, lsa6;
+
+ if (fam == AF_INET6) {
+ memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(&sin6);
+#else
+ (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
+#endif /* SCTP_KAME */
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif /* INET6 */
+ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+#ifdef INET6
+ if (fam == AF_INET6 &&
+ dest_is_loop &&
+ sifa->src_is_loop && sifa->src_is_priv) {
+ /* don't allow fe80::1 to be a src on loop ::1, we don't list it
+ * to the peer so we will get an abort.
+ */
+ continue;
+ }
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ if (fam == AF_INET6 &&
+ IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+ /* link-local <-> link-local must belong to the same scope. */
+ memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(&lsa6);
+#else
+ (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
+#endif /* SCTP_KAME */
+ if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
+ continue;
+ }
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif /* INET6 */
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+ /* Check if the IPv6 address matches to next-hop.
+ In the mobile case, old IPv6 address may be not deleted
+ from the interface. Then, the interface has previous and
+ new addresses. We should use one corresponding to the
+ next-hop. (by micchie)
+ */
+#ifdef INET6
+ if (stcb && fam == AF_INET6 &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+ if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
+ == 0) {
+ continue;
+ }
+ }
+#endif
+#ifdef INET
+ /* Avoid topologically incorrect IPv4 address */
+ if (stcb && fam == AF_INET &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+ if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
+ continue;
+ }
+ }
+#endif
+#endif
+ if (stcb) {
+ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ if (num_eligible_addr >= addr_wanted) {
+ return (sifa);
+ }
+ num_eligible_addr++;
+ }
+ return (NULL);
+}
+
+
+static int
+sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct sctp_inpcb *inp,
+#else
+ struct sctp_inpcb *inp SCTP_UNUSED,
+#endif
+ struct sctp_tcb *stcb,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ struct sctp_ifa *ifa, *sifa;
+ int num_eligible_addr = 0;
+
+ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((ifa->address.sa.sa_family == AF_INET6) &&
+ (stcb != NULL) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0)) {
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL) {
+ continue;
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ num_eligible_addr++;
+ }
+ return (num_eligible_addr);
+}
+
+static struct sctp_ifa *
+sctp_choose_boundall(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ sctp_route_t *ro,
+ uint32_t vrf_id,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ int non_asoc_addr_ok,
+ sa_family_t fam)
+{
+ int cur_addr_num = 0, num_preferred = 0;
+ void *ifn;
+ struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ uint32_t ifn_index;
+ struct sctp_vrf *vrf;
+#ifdef INET
+ int retried = 0;
+#endif
+
+ /*-
+ * For boundall we can use any address in the association.
+ * If non_asoc_addr_ok is set we can use any address (at least in
+ * theory). So we look for preferred addresses first. If we find one,
+ * we use it. Otherwise we next try to get an address on the
+ * interface, which we should be able to do (unless non_asoc_addr_ok
+ * is false and we are routed out that way). In these cases where we
+ * can't use the address of the interface we go through all the
+ * ifn's looking for an address we can use and fill that in. Punting
+ * means we send back address 0, which will probably cause problems
+ * actually since then IP will fill in the address of the route ifn,
+ * which means we probably already rejected it.. i.e. here comes an
+ * abort :-<.
+ */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
+ emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+ if (sctp_ifn == NULL) {
+ /* ?? We don't have this guy ?? */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
+ goto bound_all_plan_b;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
+ ifn_index, sctp_ifn->ifn_name);
+
+ if (net) {
+ cur_addr_num = net->indx_of_eligible_next_to_use;
+ }
+ num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
+ inp, stcb,
+ non_asoc_addr_ok,
+ dest_is_loop,
+ dest_is_priv, fam);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
+ num_preferred, sctp_ifn->ifn_name);
+ if (num_preferred == 0) {
+ /*
+ * no eligible addresses, we must use some other interface
+ * address if we can find one.
+ */
+ goto bound_all_plan_b;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can use, this
+ * may vary from call to call due to addresses being deprecated
+ * etc..
+ */
+ if (cur_addr_num >= num_preferred) {
+ cur_addr_num = 0;
+ }
+ /*
+ * select the nth address from the list (where cur_addr_num is the
+ * nth) and 0 is the first one, 1 is the second one etc...
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
+
+ sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
+ dest_is_priv, cur_addr_num, fam, ro);
+
+ /* if sctp_ifa is NULL something changed??, fall to plan b. */
+ if (sctp_ifa) {
+ atomic_add_int(&sctp_ifa->refcount, 1);
+ if (net) {
+ /* save off where the next one we will want */
+ net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+ }
+ return (sctp_ifa);
+ }
+ /*
+ * plan_b: Look at all interfaces and find a preferred address. If
+ * no preferred fall through to plan_c.
+ */
+ bound_all_plan_b:
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
+ sctp_ifn->ifn_name);
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
+ continue;
+ }
+ if ((sctp_ifn == looked_at) && looked_at) {
+ /* already looked at this guy */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
+ continue;
+ }
+ num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
+ dest_is_loop, dest_is_priv, fam);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,
+ "Found ifn:%p %d preferred source addresses\n",
+ ifn, num_preferred);
+ if (num_preferred == 0) {
+ /* None on this interface. */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
+ continue;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,
+ "num preferred:%d on interface:%p cur_addr_num:%d\n",
+ num_preferred, (void *)sctp_ifn, cur_addr_num);
+
+ /*
+ * Ok we have num_eligible_addr set with how many we can
+ * use, this may vary from call to call due to addresses
+ * being deprecated etc..
+ */
+ if (cur_addr_num >= num_preferred) {
+ cur_addr_num = 0;
+ }
+ sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
+ dest_is_priv, cur_addr_num, fam, ro);
+ if (sifa == NULL)
+ continue;
+ if (net) {
+ net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
+ cur_addr_num);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+#ifdef INET
+again_with_private_addresses_allowed:
+#endif
+ /* plan_c: do we have an acceptable address on the emit interface */
+ sifa = NULL;
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
+ if (emit_ifn == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
+ goto plan_d;
+ }
+ LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
+ continue;
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
+ sifa = NULL;
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some
+ * reason.. probably not yet added.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
+ sifa = NULL;
+ continue;
+ }
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ goto out;
+ }
+ plan_d:
+ /*
+ * plan_d: We are in trouble. No preferred address on the emit
+ * interface. And not even a preferred address on all interfaces.
+ * Go out and see if we can find an acceptable address somewhere
+ * amongst all interfaces.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (stcb) {
+ if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+ sifa = NULL;
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some
+ * reason.. probably not yet added.
+ */
+ sifa = NULL;
+ continue;
+ }
+ }
+ goto out;
+ }
+ }
+#ifdef INET
+ if (stcb) {
+ if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ retried = 1;
+ goto again_with_private_addresses_allowed;
+ } else if (retried == 1) {
+ stcb->asoc.scope.ipv4_local_scope = 0;
+ }
+ }
+#endif
+out:
+#ifdef INET
+ if (sifa) {
+ if (retried == 1) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ struct sctp_ifa *tmp_sifa;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+ (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin.sin_addr) != 0)) {
+ continue;
+ }
+#endif
+#ifdef INET6
+ if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+ (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+ continue;
+ }
+#endif
+#endif
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (tmp_sifa == NULL) {
+ continue;
+ }
+ if (tmp_sifa == sifa) {
+ continue;
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(tmp_sifa,
+ &stcb->asoc.scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
+ (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
+ /*
+ * It is restricted for some
+ * reason.. probably not yet added.
+ */
+ continue;
+ }
+ }
+ if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
+ (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
+ sctp_add_local_addr_restricted(stcb, tmp_sifa);
+ }
+ }
+ }
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ }
+#endif
+ return (sifa);
+}
+
+
+
+/* tcb may be NULL */
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ sctp_route_t *ro,
+ struct sctp_nets *net,
+ int non_asoc_addr_ok, uint32_t vrf_id)
+{
+ struct sctp_ifa *answer;
+ uint8_t dest_is_priv, dest_is_loop;
+ sa_family_t fam;
+#ifdef INET
+ struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
+#endif
+
+ /**
+ * Rules:
+ * - Find the route if needed, cache if I can.
+ * - Look at interface address in route, Is it in the bound list. If so we
+ * have the best source.
+ * - If not we must rotate amongst the addresses.
+ *
+ * Cavets and issues
+ *
+ * Do we need to pay attention to scope. We can have a private address
+ * or a global address we are sourcing or sending to. So if we draw
+ * it out
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ * For V4
+ * ------------------------------------------
+ * source * dest * result
+ * -----------------------------------------
+ * <a> Private * Global * NAT
+ * -----------------------------------------
+ * <b> Private * Private * No problem
+ * -----------------------------------------
+ * <c> Global * Private * Huh, How will this work?
+ * -----------------------------------------
+ * <d> Global * Global * No Problem
+ *------------------------------------------
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ * For V6
+ *------------------------------------------
+ * source * dest * result
+ * -----------------------------------------
+ * <a> Linklocal * Global *
+ * -----------------------------------------
+ * <b> Linklocal * Linklocal * No problem
+ * -----------------------------------------
+ * <c> Global * Linklocal * Huh, How will this work?
+ * -----------------------------------------
+ * <d> Global * Global * No Problem
+ *------------------------------------------
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ *
+ * And then we add to that what happens if there are multiple addresses
+ * assigned to an interface. Remember the ifa on a ifn is a linked
+ * list of addresses. So one interface can have more than one IP
+ * address. What happens if we have both a private and a global
+ * address? Do we then use context of destination to sort out which
+ * one is best? And what about NAT's sending P->G may get you a NAT
+ * translation, or should you select the G thats on the interface in
+ * preference.
+ *
+ * Decisions:
+ *
+ * - count the number of addresses on the interface.
+ * - if it is one, no problem except case <c>.
+ * For <a> we will assume a NAT out there.
+ * - if there are more than one, then we need to worry about scope P
+ * or G. We should prefer G -> G and P -> P if possible.
+ * Then as a secondary fall back to mixed types G->P being a last
+ * ditch one.
+ * - The above all works for bound all, but bound specific we need to
+ * use the same concept but instead only consider the bound
+ * addresses. If the bound set is NOT assigned to the interface then
+ * we must use rotation amongst the bound addresses..
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ro->ro_nh == NULL) {
+#else
+ if (ro->ro_rt == NULL) {
+#endif
+ /*
+ * Need a route to cache.
+ */
+ SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ro->ro_nh == NULL) {
+#else
+ if (ro->ro_rt == NULL) {
+#endif
+ return (NULL);
+ }
+#if defined(_WIN32)
+ /* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
+ fam = (sa_family_t)ro->ro_dst.sa_family;
+#else
+ fam = ro->ro_dst.sa_family;
+#endif
+ dest_is_priv = dest_is_loop = 0;
+ /* Setup our scopes for the destination */
+ switch (fam) {
+#ifdef INET
+ case AF_INET:
+ /* Scope based on outbound address */
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ dest_is_loop = 1;
+ if (net != NULL) {
+ /* mark it as local */
+ net->addr_is_local = 1;
+ }
+ } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
+ dest_is_priv = 1;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ /* Scope based on outbound address */
+#if defined(_WIN32)
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+#else
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
+ SCTP_ROUTE_IS_REAL_LOOP(ro)) {
+#endif
+ /*
+ * If the address is a loopback address, which
+ * consists of "::1" OR "fe80::1%lo0", we are loopback
+ * scope. But we don't use dest_is_priv (link local
+ * addresses).
+ */
+ dest_is_loop = 1;
+ if (net != NULL) {
+ /* mark it as local */
+ net->addr_is_local = 1;
+ }
+ } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ dest_is_priv = 1;
+ }
+ break;
+#endif
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
+ SCTP_IPI_ADDR_RLOCK();
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /*
+ * Bound all case
+ */
+ answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
+ dest_is_priv, dest_is_loop,
+ non_asoc_addr_ok, fam);
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (answer);
+ }
+ /*
+ * Subset bound case
+ */
+ if (stcb) {
+ answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
+ vrf_id, dest_is_priv,
+ dest_is_loop,
+ non_asoc_addr_ok, fam);
+ } else {
+ answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
+ non_asoc_addr_ok,
+ dest_is_priv,
+ dest_is_loop, fam);
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (answer);
+}
+
+static int
+sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
+{
+#if defined(_WIN32)
+ WSACMSGHDR cmh;
+#else
+ struct cmsghdr cmh;
+#endif
+ struct sctp_sndinfo sndinfo;
+ struct sctp_prinfo prinfo;
+ struct sctp_authinfo authinfo;
+ int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
+ int found;
+
+ /*
+ * Independent of how many mbufs, find the c_type inside the control
+ * structure and copy out the data.
+ */
+ found = 0;
+ tot_len = SCTP_BUF_LEN(control);
+ for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
+ rem_len = tot_len - off;
+ if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* There is not enough room for one more. */
+ return (found);
+ }
+ m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ return (found);
+ }
+ if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
+ /* We don't have the complete CMSG. */
+ return (found);
+ }
+ cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
+ cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
+ if ((cmh.cmsg_level == IPPROTO_SCTP) &&
+ ((c_type == cmh.cmsg_type) ||
+ ((c_type == SCTP_SNDRCV) &&
+ ((cmh.cmsg_type == SCTP_SNDINFO) ||
+ (cmh.cmsg_type == SCTP_PRINFO) ||
+ (cmh.cmsg_type == SCTP_AUTHINFO))))) {
+ if (c_type == cmh.cmsg_type) {
+ if (cpsize > INT_MAX) {
+ return (found);
+ }
+ if (cmsg_data_len < (int)cpsize) {
+ return (found);
+ }
+ /* It is exactly what we want. Copy it out. */
+ m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
+ return (1);
+ } else {
+ struct sctp_sndrcvinfo *sndrcvinfo;
+
+ sndrcvinfo = (struct sctp_sndrcvinfo *)data;
+ if (found == 0) {
+ if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
+ return (found);
+ }
+ memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
+ }
+ switch (cmh.cmsg_type) {
+ case SCTP_SNDINFO:
+ if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
+ return (found);
+ }
+ m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
+ sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
+ sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
+ sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
+ sndrcvinfo->sinfo_context = sndinfo.snd_context;
+ sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
+ break;
+ case SCTP_PRINFO:
+ if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
+ return (found);
+ }
+ m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
+ if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
+ sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
+ } else {
+ sndrcvinfo->sinfo_timetolive = 0;
+ }
+ sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
+ break;
+ case SCTP_AUTHINFO:
+ if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
+ return (found);
+ }
+ m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
+ sndrcvinfo->sinfo_keynumber_valid = 1;
+ sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
+ break;
+ default:
+ return (found);
+ }
+ found = 1;
+ }
+ }
+ }
+ return (found);
+}
+
+static int
+sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
+{
+#if defined(_WIN32)
+ WSACMSGHDR cmh;
+#else
+ struct cmsghdr cmh;
+#endif
+ struct sctp_initmsg initmsg;
+#ifdef INET
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+ int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
+
+ tot_len = SCTP_BUF_LEN(control);
+ for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
+ rem_len = tot_len - off;
+ if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* There is not enough room for one more. */
+ *error = EINVAL;
+ return (1);
+ }
+ m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ *error = EINVAL;
+ return (1);
+ }
+ if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
+ /* We don't have the complete CMSG. */
+ *error = EINVAL;
+ return (1);
+ }
+ cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
+ cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
+ if (cmh.cmsg_level == IPPROTO_SCTP) {
+ switch (cmh.cmsg_type) {
+ case SCTP_INIT:
+ if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
+ *error = EINVAL;
+ return (1);
+ }
+ m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
+ if (initmsg.sinit_max_attempts)
+ stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
+ if (initmsg.sinit_num_ostreams)
+ stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
+ if (initmsg.sinit_max_instreams)
+ stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
+ if (initmsg.sinit_max_init_timeo)
+ stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
+ if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
+ struct sctp_stream_out *tmp_str;
+ unsigned int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+ int j;
+#endif
+
+ /* Default is NOT correct */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
+ stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_MALLOC(tmp_str,
+ struct sctp_stream_out *,
+ (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ SCTP_TCB_LOCK(stcb);
+ if (tmp_str != NULL) {
+ SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
+ stcb->asoc.strmout = tmp_str;
+ stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
+ } else {
+ stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
+ }
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = 0;
+ stcb->asoc.strmout[i].next_mid_ordered = 0;
+ stcb->asoc.strmout[i].next_mid_unordered = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+ for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+ stcb->asoc.strmout[i].abandoned_sent[j] = 0;
+ stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
+ }
+#else
+ stcb->asoc.strmout[i].abandoned_sent[0] = 0;
+ stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
+#endif
+ stcb->asoc.strmout[i].sid = i;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
+ stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
+ }
+ }
+ break;
+#ifdef INET
+ case SCTP_DSTADDRV4:
+ if (cmsg_data_len < (int)sizeof(struct in_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin.sin_port = stcb->rport;
+ m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+ if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+ (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ *error = EINVAL;
+ return (1);
+ }
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ break;
+#endif
+#ifdef INET6
+ case SCTP_DSTADDRV6:
+ if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+ memset(&sin6, 0, sizeof(struct sockaddr_in6));
+ sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6.sin6_port = stcb->rport;
+ m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
+ IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+ *error = EINVAL;
+ return (1);
+ }
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+ in6_sin6_2_sin(&sin, &sin6);
+ if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+ (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ *error = EINVAL;
+ return (1);
+ }
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ } else
+#endif
+ if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ *error = ENOBUFS;
+ return (1);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ return (0);
+}
+
+#if defined(INET) || defined(INET6)
+static struct sctp_tcb *
+sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
+ uint16_t port,
+ struct mbuf *control,
+ struct sctp_nets **net_p,
+ int *error)
+{
+#if defined(_WIN32)
+ WSACMSGHDR cmh;
+#else
+ struct cmsghdr cmh;
+#endif
+ struct sctp_tcb *stcb;
+ struct sockaddr *addr;
+#ifdef INET
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+ int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
+
+ tot_len = SCTP_BUF_LEN(control);
+ for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
+ rem_len = tot_len - off;
+ if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* There is not enough room for one more. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
+ if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+ /* We dont't have a complete CMSG header. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
+ /* We don't have the complete CMSG. */
+ *error = EINVAL;
+ return (NULL);
+ }
+ cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
+ cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
+ if (cmh.cmsg_level == IPPROTO_SCTP) {
+ switch (cmh.cmsg_type) {
+#ifdef INET
+ case SCTP_DSTADDRV4:
+ if (cmsg_data_len < (int)sizeof(struct in_addr)) {
+ *error = EINVAL;
+ return (NULL);
+ }
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin.sin_port = port;
+ m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+ addr = (struct sockaddr *)&sin;
+ break;
+#endif
+#ifdef INET6
+ case SCTP_DSTADDRV6:
+ if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
+ *error = EINVAL;
+ return (NULL);
+ }
+ memset(&sin6, 0, sizeof(struct sockaddr_in6));
+ sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6.sin6_port = port;
+ m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+ in6_sin6_2_sin(&sin, &sin6);
+ addr = (struct sockaddr *)&sin;
+ } else
+#endif
+ addr = (struct sockaddr *)&sin6;
+ break;
+#endif
+ default:
+ addr = NULL;
+ break;
+ }
+ if (addr) {
+ stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
+ if (stcb != NULL) {
+ return (stcb);
+ }
+ }
+ }
+ }
+ return (NULL);
+}
+#endif
+
+static struct mbuf *
+sctp_add_cookie(struct mbuf *init, int init_offset,
+ struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
+{
+ struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
+ struct sctp_state_cookie *stc;
+ struct sctp_paramhdr *ph;
+ uint16_t cookie_sz;
+
+ mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr)), 0,
+ M_NOWAIT, 1, MT_DATA);
+ if (mret == NULL) {
+ return (NULL);
+ }
+ copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
+ if (copy_init == NULL) {
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
+ }
+#endif
+ copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
+ M_NOWAIT);
+ if (copy_initack == NULL) {
+ sctp_m_freem(mret);
+ sctp_m_freem(copy_init);
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
+ }
+#endif
+ /* easy side we just drop it on the end */
+ ph = mtod(mret, struct sctp_paramhdr *);
+ SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr);
+ stc = (struct sctp_state_cookie *)((caddr_t)ph +
+ sizeof(struct sctp_paramhdr));
+ ph->param_type = htons(SCTP_STATE_COOKIE);
+ ph->param_length = 0; /* fill in at the end */
+ /* Fill in the stc cookie data */
+ memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
+
+ /* tack the INIT and then the INIT-ACK onto the chain */
+ cookie_sz = 0;
+ for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ SCTP_BUF_NEXT(m_at) = copy_init;
+ break;
+ }
+ }
+ for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ SCTP_BUF_NEXT(m_at) = copy_initack;
+ break;
+ }
+ }
+ for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ break;
+ }
+ }
+ sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
+ if (sig == NULL) {
+ /* no space, so free the entire chain */
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+ SCTP_BUF_NEXT(m_at) = sig;
+ SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE;
+ cookie_sz += SCTP_SIGNATURE_SIZE;
+ ph->param_length = htons(cookie_sz);
+ *signature = (uint8_t *)mtod(sig, caddr_t);
+ memset(*signature, 0, SCTP_SIGNATURE_SIZE);
+ return (mret);
+}
+
+static uint8_t
+sctp_get_ect(struct sctp_tcb *stcb)
+{
+ if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
+ return (SCTP_ECT0_BIT);
+ } else {
+ return (0);
+ }
+}
+
+#if defined(INET) || defined(INET6)
+static void
+sctp_handle_no_route(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int so_locked)
+{
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
+
+ if (net) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
+ if (net->dest_state & SCTP_ADDR_CONFIRMED) {
+ if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, 0,
+ (void *)net,
+ so_locked);
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ }
+ if (stcb) {
+ if (net == stcb->asoc.primary_destination) {
+ /* need a new primary */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (alt != net) {
+ if (stcb->asoc.alternate) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ }
+ stcb->asoc.alternate = alt;
+ atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ }
+ }
+ }
+}
+#endif
+
+static int
+sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, /* may be NULL */
+ struct sctp_nets *net,
+ struct sockaddr *to,
+ struct mbuf *m,
+ uint32_t auth_offset,
+ struct sctp_auth_chunk *auth,
+ uint16_t auth_keyid,
+ int nofragment_flag,
+ int ecn_ok,
+ int out_of_asoc_ok,
+ uint16_t src_port,
+ uint16_t dest_port,
+ uint32_t v_tag,
+ uint16_t port,
+ union sctp_sockstore *over_addr,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+int so_locked)
+/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
+{
+ /**
+ * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
+ * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
+ * - fill in the HMAC digest of any AUTH chunk in the packet.
+ * - calculate and fill in the SCTP checksum.
+ * - prepend an IP address header.
+ * - if boundall use INADDR_ANY.
+ * - if boundspecific do source address selection.
+ * - set fragmentation option for ipV4.
+ * - On return from IP output, check/adjust mtu size of output
+ * interface and smallest_mtu size as well.
+ */
+ /* Will need ifdefs around this */
+ struct mbuf *newm;
+ struct sctphdr *sctphdr;
+ int packet_length;
+ int ret;
+#if defined(INET) || defined(INET6)
+ uint32_t vrf_id;
+#endif
+#if defined(INET) || defined(INET6)
+ struct mbuf *o_pak;
+ sctp_route_t *ro = NULL;
+ struct udphdr *udp = NULL;
+#endif
+ uint8_t tos_value;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so = NULL;
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ sctp_m_freem(m);
+ return (EFAULT);
+ }
+#if defined(INET) || defined(INET6)
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ vrf_id = inp->def_vrf_id;
+ }
+#endif
+ /* fill in the HMAC digest for any AUTH chunk in the packet */
+ if ((auth != NULL) && (stcb != NULL)) {
+ sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
+ }
+
+ if (net) {
+ tos_value = net->dscp;
+ } else if (stcb) {
+ tos_value = stcb->asoc.default_dscp;
+ } else {
+ tos_value = inp->sctp_ep.default_dscp;
+ }
+
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct ip *ip = NULL;
+ sctp_route_t iproute;
+ int len;
+
+ len = SCTP_MIN_V4_OVERHEAD;
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net != NULL) {
+ m->m_pkthdr.flowid = net->flowid;
+ M_HASHTYPE_SET(m, net->flowtype);
+ } else {
+ m->m_pkthdr.flowid = mflowid;
+ M_HASHTYPE_SET(m, mflowtype);
+ }
+#endif
+ packet_length = sctp_calculate_len(m);
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ if (tos_value == 0) {
+ /*
+ * This means especially, that it is not set at the
+ * SCTP layer. So use the value from the IP layer.
+ */
+ tos_value = inp->ip_inp.inp.inp_ip_tos;
+ }
+ tos_value &= 0xfc;
+ if (ecn_ok) {
+ tos_value |= sctp_get_ect(stcb);
+ }
+ if ((nofragment_flag) && (port == 0)) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ip->ip_off = htons(IP_DF);
+#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
+ ip->ip_off = IP_DF;
+#else
+ ip->ip_off = htons(IP_DF);
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ip->ip_off = htons(0);
+#else
+ ip->ip_off = 0;
+#endif
+ }
+#if defined(__Userspace__)
+ ip->ip_id = htons(SCTP_IP_ID(inp)++);
+#elif defined(__FreeBSD__)
+ /* FreeBSD has a function for ip_id's */
+ ip_fillid(ip);
+#elif defined(__APPLE__)
+#if RANDOM_IP_ID
+ ip->ip_id = ip_randomid();
+#else
+ ip->ip_id = htons(ip_id++);
+#endif
+#else
+ ip->ip_id = SCTP_IP_ID(inp)++;
+#endif
+
+ ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ip->ip_len = htons(packet_length);
+#else
+ ip->ip_len = packet_length;
+#endif
+ ip->ip_tos = tos_value;
+ if (port) {
+ ip->ip_p = IPPROTO_UDP;
+ } else {
+ ip->ip_p = IPPROTO_SCTP;
+ }
+ ip->ip_sum = 0;
+ if (net == NULL) {
+ ro = &iproute;
+ memset(&iproute, 0, sizeof(iproute));
+#ifdef HAVE_SA_LEN
+ memcpy(&ro->ro_dst, to, to->sa_len);
+#else
+ memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
+#endif
+ } else {
+ ro = (sctp_route_t *)&net->ro;
+ }
+ /* Now the address selection part */
+ ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
+
+ /* call the routine to select the src address */
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(ro);
+#else
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+#endif
+ }
+ if (net->src_addr_selected == 0) {
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp,stcb,
+ ro, net, 0,
+ vrf_id);
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ /* No route to host */
+ net->src_addr_selected = 0;
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
+ } else {
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ ip->ip_src = _lsrc->address.sin.sin_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ ip->ip_src = over_addr->sin.sin_addr;
+ SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+ }
+ }
+ if (port) {
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
+#if !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ if (V_udp_cksum) {
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ } else {
+ udp->uh_sum = 0;
+ }
+#else
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+#endif
+#else
+ udp->uh_sum = 0;
+#endif
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
+ }
+
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
+ /*
+ * If source address selection fails and we find no route
+ * then the ip_output should fail as well with a
+ * NO_ROUTE_TO_HOST type error. We probably should catch
+ * that somewhere and abort the association right away
+ * (assuming this is an INIT being sent).
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ro->ro_nh == NULL) {
+#else
+ if (ro->ro_rt == NULL) {
+#endif
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here (yet)!
+ */
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ if (ro != &iproute) {
+ memcpy(&iproute, ro, sizeof(*ro));
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
+ (uint32_t) (ntohl(ip->ip_src.s_addr)));
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
+ (uint32_t)(ntohl(ip->ip_dst.s_addr)));
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
+ (void *)ro->ro_nh);
+#else
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
+ (void *)ro->ro_rt);
+#endif
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ sctp_m_freem(m);
+ return (ENOMEM);
+ }
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#if !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ if (V_udp_cksum) {
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ }
+#else
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+#endif
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ m->m_pkthdr.csum_flags = CSUM_SCTP;
+ m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+ if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (stcb) && (stcb->asoc.scope.loopback_scope))) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+ }
+#endif
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(o_pak);
+#endif
+ /* send it out. table id is taken from stcb */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
+#endif
+ SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (port) {
+ UDPSTAT_INC(udps_opackets);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret)
+ SCTP_STAT_INCR(sctps_senderrors);
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
+ if (net == NULL) {
+ /* free tempy routes */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(ro);
+#else
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
+#else
+ if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
+#endif
+ ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
+ uint32_t mtu;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
+#else
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+#endif
+ if (mtu > 0) {
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu < net->mtu) {
+ if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ }
+ net->mtu = mtu;
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if (ro->ro_nh == NULL) {
+#else
+ } else if (ro->ro_rt == NULL) {
+#endif
+ /* route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ }
+ return (ret);
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ uint32_t flowlabel, flowinfo;
+ struct ip6_hdr *ip6h;
+ struct route_in6 ip6route;
+#if !defined(__Userspace__)
+ struct ifnet *ifp;
+#endif
+ struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
+ int prev_scope = 0;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ struct sockaddr_in6 lsa6_storage;
+ int error;
+#endif
+ u_short prev_port = 0;
+ int len;
+
+ if (net) {
+ flowlabel = net->flowlabel;
+ } else if (stcb) {
+ flowlabel = stcb->asoc.default_flowlabel;
+ } else {
+ flowlabel = inp->sctp_ep.default_flowlabel;
+ }
+ if (flowlabel == 0) {
+ /*
+ * This means especially, that it is not set at the
+ * SCTP layer. So use the value from the IP layer.
+ */
+#if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+ flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
+#else
+ flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
+#endif
+ }
+ flowlabel &= 0x000fffff;
+ len = SCTP_MIN_OVERHEAD;
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net != NULL) {
+ m->m_pkthdr.flowid = net->flowid;
+ M_HASHTYPE_SET(m, net->flowtype);
+ } else {
+ m->m_pkthdr.flowid = mflowid;
+ M_HASHTYPE_SET(m, mflowtype);
+ }
+#endif
+ packet_length = sctp_calculate_len(m);
+
+ ip6h = mtod(m, struct ip6_hdr *);
+ /* protect *sin6 from overwrite */
+ sin6 = (struct sockaddr_in6 *)to;
+ tmp = *sin6;
+ sin6 = &tmp;
+
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ /* KAME hack: embed scopeid */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+ {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ sctp_m_freem(m);
+ return (EINVAL);
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ if (net == NULL) {
+ memset(&ip6route, 0, sizeof(ip6route));
+ ro = (sctp_route_t *)&ip6route;
+#ifdef HAVE_SIN6_LEN
+ memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
+#else
+ memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
+#endif
+ } else {
+ ro = (sctp_route_t *)&net->ro;
+ }
+ /*
+ * We assume here that inp_flow is in host byte order within
+ * the TCB!
+ */
+ if (tos_value == 0) {
+ /*
+ * This means especially, that it is not set at the
+ * SCTP layer. So use the value from the IP layer.
+ */
+#if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+ tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
+#else
+ tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
+#endif
+ }
+ tos_value &= 0xfc;
+ if (ecn_ok) {
+ tos_value |= sctp_get_ect(stcb);
+ }
+ flowinfo = 0x06;
+ flowinfo <<= 8;
+ flowinfo |= tos_value;
+ flowinfo <<= 20;
+ flowinfo |= flowlabel;
+ ip6h->ip6_flow = htonl(flowinfo);
+ if (port) {
+ ip6h->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6h->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
+ ip6h->ip6_dst = sin6->sin6_addr;
+
+ /*
+ * Add SRC address selection here: we can only reuse to a
+ * limited degree the kame src-addr-sel, since we can try
+ * their selection but it may not be bound.
+ */
+ memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
+ lsa6_tmp.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
+#endif
+ lsa6 = &lsa6_tmp;
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(ro);
+#else
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+#endif
+ }
+ if (net->src_addr_selected == 0) {
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* KAME hack: embed scopeid */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+ {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ sctp_m_freem(m);
+ return (EINVAL);
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb,
+ ro,
+ net,
+ 0,
+ vrf_id);
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(sin6);
+#else
+ (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
+ net->src_addr_selected = 0;
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
+ } else {
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
+ /* KAME hack: embed scopeid */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+ {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ sctp_m_freem(m);
+ return (EINVAL);
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ lsa6->sin6_addr = over_addr->sin6.sin6_addr;
+ SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+ }
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(sin6);
+#else
+ (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ }
+ lsa6->sin6_port = inp->sctp_lport;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ro->ro_nh == NULL) {
+#else
+ if (ro->ro_rt == NULL) {
+#endif
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here!
+ */
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+#ifndef SCOPEDROUTING
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ /*
+ * XXX: sa6 may not have a valid sin6_scope_id in the
+ * non-SCOPEDROUTING case.
+ */
+ memset(&lsa6_storage, 0, sizeof(lsa6_storage));
+ lsa6_storage.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ lsa6_storage.sin6_len = sizeof(lsa6_storage);
+#endif
+#ifdef SCTP_KAME
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
+#else
+ if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
+ NULL)) != 0) {
+#endif /* SCTP_KAME */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
+ sctp_m_freem(m);
+ return (error);
+ }
+ /* XXX */
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ lsa6_storage.sin6_port = inp->sctp_lport;
+ lsa6 = &lsa6_storage;
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif /* SCOPEDROUTING */
+ ip6h->ip6_src = lsa6->sin6_addr;
+
+ if (port) {
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_handle_no_route(stcb, net, so_locked);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
+ udp->uh_sum = 0;
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+ }
+
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
+ /*
+ * We set the hop limit now since there is a good chance
+ * that our ro pointer is now filled
+ */
+ ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
+#if !defined(__Userspace__)
+ ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+#endif
+
+#ifdef SCTP_DEBUG
+ /* Copy to be sure something bad is not happening */
+ sin6->sin6_addr = ip6h->ip6_dst;
+ lsa6->sin6_addr = ip6h->ip6_src;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
+ if (net) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* preserve the port and scope for link local send */
+ prev_scope = sin6->sin6_scope_id;
+ prev_port = sin6->sin6_port;
+ }
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#if !defined(__Userspace__)
+#if defined(_WIN32)
+ udp->uh_sum = 0;
+#else
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+#endif
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
+ m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+ if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (stcb) && (stcb->asoc.scope.loopback_scope))) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+ }
+#endif
+ }
+ /* send it out. table id is taken from stcb */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
+#endif
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(o_pak);
+#endif
+#if !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
+#endif
+ SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
+#else
+ SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+ if (net) {
+ /* for link local this must be done */
+ sin6->sin6_scope_id = prev_scope;
+ sin6->sin6_port = prev_port;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (port) {
+ UDPSTAT_INC(udps_opackets);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret) {
+ SCTP_STAT_INCR(sctps_senderrors);
+ }
+ if (net == NULL) {
+ /* Now if we had a temp route free it */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(ro);
+#else
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+#endif
+ } else {
+ /* PMTU check versus smallest asoc MTU goes here */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ro->ro_nh == NULL) {
+#else
+ if (ro->ro_rt == NULL) {
+#endif
+ /* Route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
+#else
+ if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
+#endif
+ ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
+ uint32_t mtu;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
+#else
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+#endif
+ if (mtu > 0) {
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu < net->mtu) {
+ if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ }
+ net->mtu = mtu;
+ }
+ }
+ }
+#if !defined(__Userspace__)
+ else if (ifp) {
+#if defined(_WIN32)
+#define ND_IFINFO(ifp) (ifp)
+#define linkmtu if_mtu
+#endif
+ if (ND_IFINFO(ifp)->linkmtu &&
+ (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
+ sctp_mtu_size_reset(inp,
+ &stcb->asoc,
+ ND_IFINFO(ifp)->linkmtu);
+ }
+ }
+#endif
+ }
+ return (ret);
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ char *buffer;
+ struct sockaddr_conn *sconn;
+ int len;
+
+ sconn = (struct sockaddr_conn *)to;
+ len = sizeof(struct sctphdr);
+ newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+ packet_length = sctp_calculate_len(m);
+ sctphdr = mtod(m, struct sctphdr *);
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+ if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
+ sctphdr->checksum = sctp_calculate_cksum(m, 0);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+ }
+ if (tos_value == 0) {
+ tos_value = inp->ip_inp.inp.inp_ip_tos;
+ }
+ tos_value &= 0xfc;
+ if (ecn_ok) {
+ tos_value |= sctp_get_ect(stcb);
+ }
+ /* Don't alloc/free for each packet */
+ if ((buffer = malloc(packet_length)) != NULL) {
+ m_copydata(m, 0, packet_length, buffer);
+ ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
+ free(buffer);
+ } else {
+ ret = ENOMEM;
+ }
+ sctp_m_freem(m);
+ return (ret);
+ }
+#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+ ((struct sockaddr *)to)->sa_family);
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return (EFAULT);
+ }
+}
+
+
+void
+sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
+{
+ struct mbuf *m, *m_last;
+ struct sctp_nets *net;
+ struct sctp_init_chunk *init;
+ struct sctp_supported_addr_param *sup_addr;
+ struct sctp_adaptation_layer_indication *ali;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ struct sctp_paramhdr *ph;
+ int cnt_inits_to = 0;
+ int error;
+ uint16_t num_ext, chunk_len, padding_len, parameter_len;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ /* INIT's always go to the primary (and usually ONLY address) */
+ net = stcb->asoc.primary_destination;
+ if (net == NULL) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ /* TSNH */
+ return;
+ }
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ (void)sctp_set_primary_addr(stcb, NULL, net);
+ } else {
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
+#ifdef INET6
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ /*
+ * special hook, if we are sending to link local it will not
+ * show up in our private address count.
+ */
+ if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
+ cnt_inits_to = 1;
+ }
+#endif
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /* This case should not happen */
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
+ return;
+ }
+ /* start the INIT timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
+ return;
+ }
+ chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
+ padding_len = 0;
+ /* Now lets put the chunk header in place */
+ init = mtod(m, struct sctp_init_chunk *);
+ /* now the chunk header */
+ init->ch.chunk_type = SCTP_INITIATION;
+ init->ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ init->ch.chunk_length = 0;
+ /* place in my tag */
+ init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
+ /* set up some of the credits. */
+ init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
+ SCTP_MINIMAL_RWND));
+ init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
+ init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
+ init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
+
+ /* Adaptation layer indication parameter */
+ if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+ parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
+ ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(parameter_len);
+ ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
+ chunk_len += parameter_len;
+ }
+
+ /* ECN parameter */
+ if (stcb->asoc.ecn_supported == 1) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_ECN_CAPABLE);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* PR-SCTP supported parameter */
+ if (stcb->asoc.prsctp_supported == 1) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* Add NAT friendly parameter. */
+ if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* And now tell the peer which extensions we support */
+ num_ext = 0;
+ pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
+ if (stcb->asoc.prsctp_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ if (stcb->asoc.idata_supported) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+ }
+ }
+ if (stcb->asoc.auth_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ }
+ if (stcb->asoc.asconf_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ }
+ if (stcb->asoc.reconfig_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ }
+ if (stcb->asoc.idata_supported) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+ }
+ if (stcb->asoc.nrsack_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+ }
+ if (stcb->asoc.pktdrop_supported == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ }
+ if (num_ext > 0) {
+ parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ pr_supported->ph.param_length = htons(parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+ /* add authentication parameters */
+ if (stcb->asoc.auth_supported) {
+ /* attach RANDOM parameter, if available */
+ if (stcb->asoc.authinfo.random != NULL) {
+ struct sctp_auth_random *randp;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
+ /* random key already contains the header */
+ memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+ /* add HMAC_ALGO parameter */
+ if (stcb->asoc.local_hmacs != NULL) {
+ struct sctp_auth_hmac_algo *hmacs;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
+ stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(parameter_len);
+ sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+ /* add CHUNKS parameter */
+ if (stcb->asoc.local_auth_chunks != NULL) {
+ struct sctp_auth_chunk_list *chunks;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
+ sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(parameter_len);
+ sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+ }
+
+ /* now any cookie time extensions */
+ if (stcb->asoc.cookie_preserve_req) {
+ struct sctp_cookie_perserve_param *cookie_preserve;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
+ cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
+ cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
+ cookie_preserve->ph.param_length = htons(parameter_len);
+ cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
+ stcb->asoc.cookie_preserve_req = 0;
+ chunk_len += parameter_len;
+ }
+
+ if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
+ uint8_t i;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ parameter_len += (uint16_t)sizeof(uint16_t);
+ }
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ parameter_len += (uint16_t)sizeof(uint16_t);
+ }
+ sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
+ sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
+ sup_addr->ph.param_length = htons(parameter_len);
+ i = 0;
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
+ }
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
+ }
+ padding_len = 4 - 2 * i;
+ chunk_len += parameter_len;
+ }
+
+ SCTP_BUF_LEN(m) = chunk_len;
+ /* now the addresses */
+ /* To optimize this we could put the scoping stuff
+ * into a structure and remove the individual uint8's from
+ * the assoc structure. Then we could just sifa in the
+ * address within the stcb. But for now this is a quick
+ * hack to get the address stuff teased apart.
+ */
+ m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
+ m, cnt_inits_to,
+ &padding_len, &chunk_len);
+
+ init->ch.chunk_length = htons(chunk_len);
+ if (padding_len > 0) {
+ if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+ sctp_m_freem(m);
+ return;
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m, 0, NULL, 0, 0, 0, 0,
+ inp->sctp_lport, stcb->rport, htonl(0),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ stcb->asoc.ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ } else {
+ stcb->asoc.ifp_had_enobuf = 0;
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+}
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
+ int param_offset, int *abort_processing,
+ struct sctp_chunkhdr *cp,
+ int *nat_friendly,
+ int *cookie_found)
+{
+ /*
+ * Given a mbuf containing an INIT or INIT-ACK with the param_offset
+ * being equal to the beginning of the params i.e. (iphlen +
+ * sizeof(struct sctp_init_msg) parse through the parameters to the
+ * end of the mbuf verifying that all parameters are known.
+ *
+ * For unknown parameters build and return a mbuf with
+ * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
+ * processing this chunk stop, and set *abort_processing to 1.
+ *
+ * By having param_offset be pre-set to where parameters begin it is
+ * hoped that this routine may be reused in the future by new
+ * features.
+ */
+ struct sctp_paramhdr *phdr, params;
+
+ struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
+ int at, limit, pad_needed;
+ uint16_t ptype, plen, padded_size;
+
+ *abort_processing = 0;
+ if (cookie_found != NULL) {
+ *cookie_found = 0;
+ }
+ mat = in_initpkt;
+ limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
+ at = param_offset;
+ op_err = NULL;
+ op_err_last = NULL;
+ pad_needed = 0;
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
+ /* wacked parameter */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
+ goto invalid_size;
+ }
+ limit -= SCTP_SIZE32(plen);
+ /*-
+ * All parameters for all chunks that we know/understand are
+ * listed here. We process them other places and make
+ * appropriate stop actions per the upper bits. However this
+ * is the generic routine processor's can call to get back
+ * an operr.. to either incorporate (init-ack) or send.
+ */
+ padded_size = SCTP_SIZE32(plen);
+ switch (ptype) {
+ /* Param's with variable size */
+ case SCTP_HEARTBEAT_INFO:
+ case SCTP_UNRECOG_PARAM:
+ case SCTP_ERROR_CAUSE_IND:
+ /* ok skip fwd */
+ at += padded_size;
+ break;
+ case SCTP_STATE_COOKIE:
+ if (cookie_found != NULL) {
+ *cookie_found = 1;
+ }
+ at += padded_size;
+ break;
+ /* Param's with variable size within a range */
+ case SCTP_CHUNK_LIST:
+ case SCTP_SUPPORTED_CHUNK_EXT:
+ if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SUPPORTED_ADDRTYPE:
+ if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_RANDOM:
+ if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_ADD_IP_ADDRESS:
+ if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
+ (padded_size != sizeof(struct sctp_asconf_addr_param))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ /* Param's with a fixed size */
+ case SCTP_IPV4_ADDRESS:
+ if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_COOKIE_PRESERVE:
+ if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_HAS_NAT_SUPPORT:
+ *nat_friendly = 1;
+ /* fall through */
+ case SCTP_PRSCTP_SUPPORTED:
+ if (padded_size != sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_ECN_CAPABLE:
+ if (padded_size != sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_ULP_ADAPTATION:
+ if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SUCCESS_REPORT:
+ if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_HOSTNAME_ADDRESS:
+ {
+ /* Hostname parameters are deprecated. */
+ struct sctp_gen_error_cause *cause;
+ int l_len;
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
+ *abort_processing = 1;
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+#ifdef INET6
+ l_len = SCTP_MIN_OVERHEAD;
+#else
+ l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+ l_len += sizeof(struct sctp_chunkhdr);
+ l_len += sizeof(struct sctp_gen_error_cause);
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err != NULL) {
+ /*
+ * Pre-reserve space for IP, SCTP, and
+ * chunk header.
+ */
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+ cause = mtod(op_err, struct sctp_gen_error_cause *);
+ cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
+ cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
+ SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
+ if (SCTP_BUF_NEXT(op_err) == NULL) {
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+ }
+ }
+ return (op_err);
+ }
+ default:
+ /*
+ * we do not recognize the parameter figure out what
+ * we do.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
+ if ((ptype & 0x4000) == 0x4000) {
+ /* Report bit is set?? */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
+ if (op_err == NULL) {
+ int l_len;
+ /* Ok need to try to get an mbuf */
+#ifdef INET6
+ l_len = SCTP_MIN_OVERHEAD;
+#else
+ l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+ l_len += sizeof(struct sctp_chunkhdr);
+ l_len += sizeof(struct sctp_paramhdr);
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err) {
+ SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ op_err_last = op_err;
+ }
+ }
+ if (op_err != NULL) {
+ /* If we have space */
+ struct sctp_paramhdr *param;
+
+ if (pad_needed > 0) {
+ op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
+ }
+ if (op_err_last == NULL) {
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+ goto more_processing;
+ }
+ if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
+ m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_tmp == NULL) {
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+ goto more_processing;
+ }
+ SCTP_BUF_LEN(m_tmp) = 0;
+ SCTP_BUF_NEXT(m_tmp) = NULL;
+ SCTP_BUF_NEXT(op_err_last) = m_tmp;
+ op_err_last = m_tmp;
+ }
+ param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
+ param->param_type = htons(SCTP_UNRECOG_PARAM);
+ param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
+ SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
+ SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
+ if (SCTP_BUF_NEXT(op_err_last) == NULL) {
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+ goto more_processing;
+ } else {
+ while (SCTP_BUF_NEXT(op_err_last) != NULL) {
+ op_err_last = SCTP_BUF_NEXT(op_err_last);
+ }
+ }
+ if (plen % 4 != 0) {
+ pad_needed = 4 - (plen % 4);
+ } else {
+ pad_needed = 0;
+ }
+ }
+ }
+ more_processing:
+ if ((ptype & 0x8000) == 0x0000) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
+ return (op_err);
+ } else {
+ /* skip this chunk and continue processing */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
+ at += SCTP_SIZE32(plen);
+ }
+ break;
+
+ }
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ }
+ return (op_err);
+ invalid_size:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
+ *abort_processing = 1;
+ sctp_m_freem(op_err);
+ op_err = NULL;
+ op_err_last = NULL;
+ if (phdr != NULL) {
+ struct sctp_paramhdr *param;
+ int l_len;
+#ifdef INET6
+ l_len = SCTP_MIN_OVERHEAD;
+#else
+ l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+ l_len += sizeof(struct sctp_chunkhdr);
+ l_len += (2 * sizeof(struct sctp_paramhdr));
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (op_err) {
+ SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
+ param = mtod(op_err, struct sctp_paramhdr *);
+ param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
+ param++;
+ param->param_type = htons(ptype);
+ param->param_length = htons(plen);
+ }
+ }
+ return (op_err);
+}
+
+static int
+sctp_are_there_new_addresses(struct sctp_association *asoc,
+ struct mbuf *in_initpkt, int offset, struct sockaddr *src)
+{
+ /*
+ * Given a INIT packet, look through the packet to verify that there
+ * are NO new addresses. As we go through the parameters add reports
+ * of any un-understood parameters that require an error. Also we
+ * must return (1) to drop the packet if we see a un-understood
+ * parameter that tells us to drop the chunk.
+ */
+ struct sockaddr *sa_touse;
+ struct sockaddr *sa;
+ struct sctp_paramhdr *phdr, params;
+ uint16_t ptype, plen;
+ uint8_t fnd;
+ struct sctp_nets *net;
+ int check_src;
+#ifdef INET
+ struct sockaddr_in sin4, *sa4;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6, *sa6;
+#endif
+#if defined(__Userspace__)
+ struct sockaddr_conn *sac;
+#endif
+
+#ifdef INET
+ memset(&sin4, 0, sizeof(sin4));
+ sin4.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin4.sin_len = sizeof(sin4);
+#endif
+#endif
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+#endif
+ /* First what about the src address of the pkt ? */
+ check_src = 0;
+ switch (src->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (asoc->scope.ipv4_addr_legal) {
+ check_src = 1;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (asoc->scope.ipv6_addr_legal) {
+ check_src = 1;
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (asoc->scope.conn_addr_legal) {
+ check_src = 1;
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ if (check_src) {
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family == src->sa_family) {
+#ifdef INET
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *src4;
+
+ sa4 = (struct sockaddr_in *)sa;
+ src4 = (struct sockaddr_in *)src;
+ if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src6;
+
+ sa6 = (struct sockaddr_in6 *)sa;
+ src6 = (struct sockaddr_in6 *)src;
+ if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+#if defined(__Userspace__)
+ if (sa->sa_family == AF_CONN) {
+ struct sockaddr_conn *srcc;
+
+ sac = (struct sockaddr_conn *)sa;
+ srcc = (struct sockaddr_conn *)src;
+ if (sac->sconn_addr == srcc->sconn_addr) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+ }
+ }
+ if (fnd == 0) {
+ /* New address added! no need to look further. */
+ return (1);
+ }
+ }
+ /* Ok so far lets munge through the rest of the packet */
+ offset += sizeof(struct sctp_init_chunk);
+ phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
+ while (phdr) {
+ sa_touse = NULL;
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ switch (ptype) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ if (plen != sizeof(struct sctp_ipv4addr_param)) {
+ return (1);
+ }
+ phdr = sctp_get_next_param(in_initpkt, offset,
+ (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+ if (phdr == NULL) {
+ return (1);
+ }
+ if (asoc->scope.ipv4_addr_legal) {
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin4.sin_addr.s_addr = p4->addr;
+ sa_touse = (struct sockaddr *)&sin4;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ {
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ if (plen != sizeof(struct sctp_ipv6addr_param)) {
+ return (1);
+ }
+ phdr = sctp_get_next_param(in_initpkt, offset,
+ (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+ if (phdr == NULL) {
+ return (1);
+ }
+ if (asoc->scope.ipv6_addr_legal) {
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ sa_touse = (struct sockaddr *)&sin6;
+ }
+ break;
+ }
+#endif
+ default:
+ sa_touse = NULL;
+ break;
+ }
+ if (sa_touse) {
+ /* ok, sa_touse points to one to check */
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family != sa_touse->sa_family) {
+ continue;
+ }
+#ifdef INET
+ if (sa->sa_family == AF_INET) {
+ sa4 = (struct sockaddr_in *)sa;
+ if (sa4->sin_addr.s_addr ==
+ sin4.sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ sa6 = (struct sockaddr_in6 *)sa;
+ if (SCTP6_ARE_ADDR_EQUAL(
+ sa6, &sin6)) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+ }
+ if (!fnd) {
+ /* New addr added! no need to look further */
+ return (1);
+ }
+ }
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
+ }
+ return (0);
+}
+
+/*
+ * Given a MBUF chain that was sent into us containing an INIT. Build a
+ * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
+ * a pullup to include IPv6/4header, SCTP header and initial part of INIT
+ * message (i.e. the struct sctp_init_msg).
+ */
+void
+sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *src_net, struct mbuf *init_pkt,
+ int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_init_chunk *init_chk,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_association *asoc;
+ struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
+ struct sctp_init_ack_chunk *initack;
+ struct sctp_adaptation_layer_indication *ali;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ struct sctp_paramhdr *ph;
+ union sctp_sockstore *over_addr;
+ struct sctp_scoping scp;
+ struct timeval now;
+#ifdef INET
+ struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
+ struct sockaddr_in *src4 = (struct sockaddr_in *)src;
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
+ struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *sin6;
+#endif
+#if defined(__Userspace__)
+ struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
+ struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
+ struct sockaddr_conn *sconn;
+#endif
+ struct sockaddr *to;
+ struct sctp_state_cookie stc;
+ struct sctp_nets *net = NULL;
+ uint8_t *signature = NULL;
+ int cnt_inits_to = 0;
+ uint16_t his_limit, i_want;
+ int abort_flag;
+ int nat_friendly = 0;
+ int error;
+ struct socket *so;
+ uint16_t num_ext, chunk_len, padding_len, parameter_len;
+
+ if (stcb) {
+ asoc = &stcb->asoc;
+ } else {
+ asoc = NULL;
+ }
+ if ((asoc != NULL) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
+ if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
+ /*
+ * new addresses, out of here in non-cookie-wait states
+ *
+ * Send an ABORT, without the new address error cause.
+ * This looks no different than if no listener
+ * was present.
+ */
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Address added");
+ sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ return;
+ }
+ if (src_net != NULL && (src_net->port != port)) {
+ /*
+ * change of remote encapsulation port, out of here in
+ * non-cookie-wait states
+ *
+ * Send an ABORT, without an specific error cause.
+ * This looks no different than if no listener
+ * was present.
+ */
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Remote encapsulation port changed");
+ sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ return;
+ }
+ }
+ abort_flag = 0;
+ op_err = sctp_arethere_unrecognized_parameters(init_pkt,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag,
+ (struct sctp_chunkhdr *)init_chk,
+ &nat_friendly, NULL);
+ if (abort_flag) {
+ do_a_abort:
+ if (op_err == NULL) {
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ }
+ sctp_send_abort(init_pkt, iphlen, src, dst, sh,
+ init_chk->init.initiate_tag, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ return;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ sctp_m_freem(op_err);
+ return;
+ }
+ chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
+ padding_len = 0;
+
+ /*
+ * We might not overwrite the identification[] completely and on
+ * some platforms time_entered will contain some padding.
+ * Therefore zero out the cookie to avoid putting
+ * uninitialized memory on the wire.
+ */
+ memset(&stc, 0, sizeof(struct sctp_state_cookie));
+
+ /* the time I built cookie */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ stc.time_entered.tv_sec = now.tv_sec;
+ stc.time_entered.tv_usec = now.tv_usec;
+
+ /* populate any tie tags */
+ if (asoc != NULL) {
+ /* unlock before tag selections */
+ stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
+ stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
+ stc.cookie_life = asoc->cookie_life;
+ net = asoc->primary_destination;
+ } else {
+ stc.tie_tag_my_vtag = 0;
+ stc.tie_tag_peer_vtag = 0;
+ /* life I will award this cookie */
+ stc.cookie_life = inp->sctp_ep.def_cookie_life;
+ }
+
+ /* copy in the ports for later check */
+ stc.myport = sh->dest_port;
+ stc.peerport = sh->src_port;
+
+ /*
+ * If we wanted to honor cookie life extensions, we would add to
+ * stc.cookie_life. For now we should NOT honor any extension
+ */
+ stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ stc.ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ stc.ipv4_addr_legal = 0;
+ } else {
+ stc.ipv4_addr_legal = 1;
+ }
+#if defined(__Userspace__)
+ stc.conn_addr_legal = 0;
+#endif
+ } else {
+ stc.ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ stc.conn_addr_legal = 1;
+ stc.ipv4_addr_legal = 0;
+ } else {
+ stc.conn_addr_legal = 0;
+ stc.ipv4_addr_legal = 1;
+ }
+#else
+ stc.ipv4_addr_legal = 1;
+#endif
+ }
+ stc.ipv4_scope = 0;
+ if (net == NULL) {
+ to = src;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ /* lookup address */
+ stc.address[0] = src4->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ /* local from address */
+ stc.laddress[0] = dst4->sin_addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
+ if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
+ (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
+ stc.ipv4_scope = 1;
+ }
+ /* Must use the address in this case */
+ if (sctp_is_address_on_local_host(src, vrf_id)) {
+ stc.loopback_scope = 1;
+ stc.ipv4_scope = 1;
+ stc.site_scope = 1;
+ stc.local_scope = 0;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
+#else
+ stc.scope_id = 0;
+#endif
+ if (sctp_is_address_on_local_host(src, vrf_id)) {
+ stc.loopback_scope = 1;
+ stc.local_scope = 0;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
+ IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
+ /*
+ * If the new destination or source is a
+ * LINK_LOCAL we must have common both site and
+ * local scope. Don't set local scope though
+ * since we must depend on the source to be
+ * added implicitly. We cannot assure just
+ * because we share one link that all links are
+ * common.
+ */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ /* Mac OS X currently doesn't have in6_getscope() */
+ stc.scope_id = src6->sin6_addr.s6_addr16[1];
+#endif
+ stc.local_scope = 0;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ /*
+ * we start counting for the private address
+ * stuff at 1. since the link local we
+ * source from won't show up in our scoped
+ * count.
+ */
+ cnt_inits_to = 1;
+ /* pull out the scope_id from incoming pkt */
+ } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
+ IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
+ /*
+ * If the new destination or source is
+ * SITE_LOCAL then we must have site scope in
+ * common.
+ */
+ stc.site_scope = 1;
+ }
+ memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ /* lookup address */
+ stc.address[0] = 0;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
+ stc.addr_type = SCTP_CONN_ADDRESS;
+ /* local from address */
+ stc.laddress[0] = 0;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
+ stc.laddr_type = SCTP_CONN_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ goto do_a_abort;
+ break;
+ }
+ } else {
+ /* set the scope per the existing tcb */
+
+#ifdef INET6
+ struct sctp_nets *lnet;
+#endif
+
+ stc.loopback_scope = asoc->scope.loopback_scope;
+ stc.ipv4_scope = asoc->scope.ipv4_local_scope;
+ stc.site_scope = asoc->scope.site_scope;
+ stc.local_scope = asoc->scope.local_scope;
+#ifdef INET6
+ /* Why do we not consider IPv4 LL addresses? */
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
+ /*
+ * if we have a LL address, start
+ * counting at 1.
+ */
+ cnt_inits_to = 1;
+ }
+ }
+ }
+#endif
+ /* use the net pointer */
+ to = (struct sockaddr *)&net->ro._l_addr;
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)to;
+ stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * did the selection.
+ */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb, (sctp_route_t *)&net->ro,
+ net, 0, vrf_id);
+ if (net->ro._s_addr == NULL) {
+ sctp_m_freem(op_err);
+ sctp_m_freem(m);
+ return;
+ }
+
+ net->src_addr_selected = 1;
+
+ }
+ stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)to;
+ memcpy(&stc.address, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ stc.scope_id = sin6->sin6_scope_id;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * done the selection.
+ */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb, (sctp_route_t *)&net->ro,
+ net, 0, vrf_id);
+ if (net->ro._s_addr == NULL) {
+ sctp_m_freem(op_err);
+ sctp_m_freem(m);
+ return;
+ }
+
+ net->src_addr_selected = 1;
+ }
+ memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ sconn = (struct sockaddr_conn *)to;
+ stc.address[0] = 0;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
+ stc.addr_type = SCTP_CONN_ADDRESS;
+ stc.laddress[0] = 0;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
+ stc.laddr_type = SCTP_CONN_ADDRESS;
+ stc.scope_id = 0;
+ break;
+#endif
+ }
+ }
+ /* Now lets put the SCTP header in place */
+ initack = mtod(m, struct sctp_init_ack_chunk *);
+ /* Save it off for quick ref */
+ stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
+ /* who are we */
+ memcpy(stc.identification, SCTP_VERSION_STRING,
+ min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
+ memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
+ /* now the chunk header */
+ initack->ch.chunk_type = SCTP_INITIATION_ACK;
+ initack->ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ initack->ch.chunk_length = 0;
+ /* place in my tag */
+ if ((asoc != NULL) &&
+ ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
+ /* re-use the v-tags and init-seq here */
+ initack->init.initiate_tag = htonl(asoc->my_vtag);
+ initack->init.initial_tsn = htonl(asoc->init_seq_number);
+ } else {
+ uint32_t vtag, itsn;
+
+ if (asoc) {
+ atomic_add_int(&asoc->refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ new_tag:
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+ if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
+ /* Got a duplicate vtag on some guy behind a nat
+ * make sure we don't use it.
+ */
+ goto new_tag;
+ }
+ initack->init.initiate_tag = htonl(vtag);
+ /* get a TSN to use too */
+ itsn = sctp_select_initial_TSN(&inp->sctp_ep);
+ initack->init.initial_tsn = htonl(itsn);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&asoc->refcnt, -1);
+ } else {
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+ initack->init.initiate_tag = htonl(vtag);
+ /* get a TSN to use too */
+ initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
+ SCTP_INP_RLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ /* save away my tag to */
+ stc.my_vtag = initack->init.initiate_tag;
+
+ /* set up some of the credits. */
+ so = inp->sctp_socket;
+ if (so == NULL) {
+ /* memory problem */
+ sctp_m_freem(op_err);
+ sctp_m_freem(m);
+ return;
+ } else {
+ initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
+ }
+ /* set what I want */
+ his_limit = ntohs(init_chk->init.num_inbound_streams);
+ /* choose what I want */
+ if (asoc != NULL) {
+ if (asoc->streamoutcnt > asoc->pre_open_streams) {
+ i_want = asoc->streamoutcnt;
+ } else {
+ i_want = asoc->pre_open_streams;
+ }
+ } else {
+ i_want = inp->sctp_ep.pre_open_stream_count;
+ }
+ if (his_limit < i_want) {
+ /* I Want more :< */
+ initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
+ } else {
+ /* I can have what I want :> */
+ initack->init.num_outbound_streams = htons(i_want);
+ }
+ /* tell him his limit. */
+ initack->init.num_inbound_streams =
+ htons(inp->sctp_ep.max_open_streams_intome);
+
+ /* adaptation layer indication parameter */
+ if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+ parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
+ ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(parameter_len);
+ ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
+ chunk_len += parameter_len;
+ }
+
+ /* ECN parameter */
+ if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
+ ((asoc == NULL) && (inp->ecn_supported == 1))) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_ECN_CAPABLE);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* PR-SCTP supported parameter */
+ if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
+ ((asoc == NULL) && (inp->prsctp_supported == 1))) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* Add NAT friendly parameter */
+ if (nat_friendly) {
+ parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(parameter_len);
+ chunk_len += parameter_len;
+ }
+
+ /* And now tell the peer which extensions we support */
+ num_ext = 0;
+ pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
+ if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
+ ((asoc == NULL) && (inp->prsctp_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+ ((asoc == NULL) && (inp->idata_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+ }
+ }
+ if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
+ ((asoc == NULL) && (inp->auth_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ }
+ if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
+ ((asoc == NULL) && (inp->asconf_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ }
+ if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
+ ((asoc == NULL) && (inp->reconfig_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ }
+ if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+ ((asoc == NULL) && (inp->idata_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+ }
+ if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
+ ((asoc == NULL) && (inp->nrsack_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+ }
+ if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
+ ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ }
+ if (num_ext > 0) {
+ parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ pr_supported->ph.param_length = htons(parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+
+ /* add authentication parameters */
+ if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
+ ((asoc == NULL) && (inp->auth_supported == 1))) {
+ struct sctp_auth_random *randp;
+ struct sctp_auth_hmac_algo *hmacs;
+ struct sctp_auth_chunk_list *chunks;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ /* generate and add RANDOM parameter */
+ randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
+ SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+ randp->ph.param_type = htons(SCTP_RANDOM);
+ randp->ph.param_length = htons(parameter_len);
+ SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ /* add HMAC_ALGO parameter */
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
+ sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
+ (uint8_t *)hmacs->hmac_ids);
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ /* add CHUNKS parameter */
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
+ parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
+ sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
+ chunks->chunk_types);
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(parameter_len);
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+ }
+ SCTP_BUF_LEN(m) = chunk_len;
+ m_last = m;
+ /* now the addresses */
+ /* To optimize this we could put the scoping stuff
+ * into a structure and remove the individual uint8's from
+ * the stc structure. Then we could just sifa in the
+ * address within the stc.. but for now this is a quick
+ * hack to get the address stuff teased apart.
+ */
+ scp.ipv4_addr_legal = stc.ipv4_addr_legal;
+ scp.ipv6_addr_legal = stc.ipv6_addr_legal;
+#if defined(__Userspace__)
+ scp.conn_addr_legal = stc.conn_addr_legal;
+#endif
+ scp.loopback_scope = stc.loopback_scope;
+ scp.ipv4_local_scope = stc.ipv4_scope;
+ scp.local_scope = stc.local_scope;
+ scp.site_scope = stc.site_scope;
+ m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
+ cnt_inits_to,
+ &padding_len, &chunk_len);
+ /* padding_len can only be positive, if no addresses have been added */
+ if (padding_len > 0) {
+ memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+ chunk_len += padding_len;
+ SCTP_BUF_LEN(m) += padding_len;
+ padding_len = 0;
+ }
+
+ /* tack on the operational error if present */
+ if (op_err) {
+ parameter_len = 0;
+ for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+ parameter_len += SCTP_BUF_LEN(m_tmp);
+ }
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ SCTP_BUF_NEXT(m_last) = op_err;
+ while (SCTP_BUF_NEXT(m_last) != NULL) {
+ m_last = SCTP_BUF_NEXT(m_last);
+ }
+ chunk_len += parameter_len;
+ }
+ if (padding_len > 0) {
+ m_last = sctp_add_pad_tombuf(m_last, padding_len);
+ if (m_last == NULL) {
+ /* Houston we have a problem, no space */
+ sctp_m_freem(m);
+ return;
+ }
+ chunk_len += padding_len;
+ padding_len = 0;
+ }
+ /* Now we must build a cookie */
+ m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
+ if (m_cookie == NULL) {
+ /* memory problem */
+ sctp_m_freem(m);
+ return;
+ }
+ /* Now append the cookie to the end and update the space/size */
+ SCTP_BUF_NEXT(m_last) = m_cookie;
+ parameter_len = 0;
+ for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+ parameter_len += SCTP_BUF_LEN(m_tmp);
+ if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+ m_last = m_tmp;
+ }
+ }
+ padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+ chunk_len += parameter_len;
+
+ /* Place in the size, but we don't include
+ * the last pad (if any) in the INIT-ACK.
+ */
+ initack->ch.chunk_length = htons(chunk_len);
+
+ /* Time to sign the cookie, we don't sign over the cookie
+ * signature though thus we set trailer.
+ */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
+ SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
+ (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
+#if defined(__Userspace__)
+ /*
+ * Don't put AF_CONN addresses on the wire, in case this is critical
+ * for the application. However, they are protected by the HMAC and
+ * need to be reconstructed before checking the HMAC.
+ * Clearing is only done in the mbuf chain, since the local stc is
+ * not used anymore.
+ */
+ if (stc.addr_type == SCTP_CONN_ADDRESS) {
+ const void *p = NULL;
+
+ m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address),
+ (int)sizeof(void *), (caddr_t)&p);
+ }
+ if (stc.laddr_type == SCTP_CONN_ADDRESS) {
+ const void *p = NULL;
+
+ m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress),
+ (int)sizeof(void *), (caddr_t)&p);
+ }
+#endif
+ /*
+ * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
+ * here since the timer will drive a retranmission.
+ */
+ if (padding_len > 0) {
+ if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+ sctp_m_freem(m);
+ return;
+ }
+ }
+ if (stc.loopback_scope) {
+ over_addr = (union sctp_sockstore *)dst;
+ } else {
+ over_addr = NULL;
+ }
+
+ if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
+ 0, 0,
+ inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
+ port, over_addr,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid,
+#endif
+ SCTP_SO_NOT_LOCKED))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ if (asoc != NULL) {
+ asoc->ifp_had_enobuf = 1;
+ }
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ } else {
+ if (asoc != NULL) {
+ asoc->ifp_had_enobuf = 0;
+ }
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+
+static void
+sctp_prune_prsctp(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ int dataout)
+{
+ int freed_spc = 0;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if ((asoc->prsctp_supported) &&
+ (asoc->sent_queue_cnt_removeable > 0)) {
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ /*
+ * Look for chunks marked with the PR_SCTP flag AND
+ * the buffer space flag. If the one being sent is
+ * equal or greater priority then purge the old one
+ * and free some space.
+ */
+ if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+ /*
+ * This one is PR-SCTP AND buffer space
+ * limited type
+ */
+ if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
+ /*
+ * Lower numbers equates to higher
+ * priority. So if the one we are
+ * looking at has a larger priority,
+ * we want to drop the data and NOT
+ * retransmit it.
+ */
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+ uint8_t sent;
+
+ if (chk->sent > SCTP_DATAGRAM_UNSENT)
+ sent = 1;
+ else
+ sent = 0;
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ sent,
+ SCTP_SO_LOCKED);
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* if chunk was present */
+ } /* if of sufficient priority */
+ } /* if chunk has enabled */
+ } /* tailqforeach */
+
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+ /* Here we must move to the sent queue and mark */
+ if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+ if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ 0, SCTP_SO_LOCKED);
+
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* end if chk->data */
+ } /* end if right class */
+ } /* end if chk pr-sctp */
+ } /* tailqforeachsafe (chk) */
+ } /* if enabled in asoc */
+}
+
+int
+sctp_get_frag_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ int siz, ovh;
+
+ /*
+ * For endpoints that have both v6 and v4 addresses we must reserve
+ * room for the ipv6 header, for those that are only dealing with V4
+ * we use a larger frag point.
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+#if defined(__Userspace__)
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ ovh = sizeof(struct sctphdr);
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+#else
+ ovh = SCTP_MIN_V4_OVERHEAD;
+#endif
+ }
+ ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
+ if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
+ siz = asoc->smallest_mtu - ovh;
+ else
+ siz = (stcb->asoc.sctp_frag_point - ovh);
+ /*
+ * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
+ */
+ /* A data chunk MUST fit in a cluster */
+ /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
+ /* } */
+
+ /* adjust for an AUTH chunk if DATA requires auth */
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
+ siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+
+ if (siz % 4) {
+ /* make it an even word boundary please */
+ siz -= (siz % 4);
+ }
+ return (siz);
+}
+
+static void
+sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
+{
+ /*
+ * We assume that the user wants PR_SCTP_TTL if the user
+ * provides a positive lifetime but does not specify any
+ * PR_SCTP policy.
+ */
+ if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
+ sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+ } else if (sp->timetolive > 0) {
+ sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
+ sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+ } else {
+ return;
+ }
+ switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
+ case CHUNK_FLAGS_PR_SCTP_BUF:
+ /*
+ * Time to live is a priority stored in tv_sec when
+ * doing the buffer drop thing.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ case CHUNK_FLAGS_PR_SCTP_TTL:
+ {
+ struct timeval tv;
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+ tv.tv_sec = sp->timetolive / 1000;
+ tv.tv_usec = (sp->timetolive * 1000) % 1000000;
+ /* TODO sctp_constants.h needs alternative time macros when
+ * _KERNEL is undefined.
+ */
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ timeradd(&sp->ts, &tv, &sp->ts);
+#else
+ timevaladd(&sp->ts, &tv);
+#endif
+ }
+ break;
+ case CHUNK_FLAGS_PR_SCTP_RTX:
+ /*
+ * Time to live is a the number or retransmissions
+ * stored in tv_sec.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_USRREQ1,
+ "Unknown PR_SCTP policy %u.\n",
+ PR_SCTP_POLICY(sp->sinfo_flags));
+ break;
+ }
+}
+
+static int
+sctp_msg_append(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
+{
+ int error = 0;
+ struct mbuf *at;
+ struct sctp_stream_queue_pending *sp = NULL;
+ struct sctp_stream_out *strm;
+
+ /* Given an mbuf chain, put it
+ * into the association send queue and
+ * place it on the wheel
+ */
+ if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
+ /* Invalid stream number */
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((stcb->asoc.stream_locked) &&
+ (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_now;
+ }
+ sctp_alloc_a_strmoq(stcb, sp);
+ if (sp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ error = ENOMEM;
+ goto out_now;
+ }
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->fsn = 0;
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ atomic_add_int(&sp->net->ref_count, 1);
+ } else {
+ sp->net = NULL;
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+ sp->sid = srcv->sinfo_stream;
+ sp->msg_is_complete = 1;
+ sp->sender_all_done = 1;
+ sp->some_taken = 0;
+ sp->data = m;
+ sp->tail_mbuf = NULL;
+ sctp_set_prsctp_policy(sp);
+ /* We could in theory (for sendall) sifa the length
+ * in, but we would still have to hunt through the
+ * chain since we need to setup the tail_mbuf
+ */
+ sp->length = 0;
+ for (at = m; at; at = SCTP_BUF_NEXT(at)) {
+ if (SCTP_BUF_NEXT(at) == NULL)
+ sp->tail_mbuf = at;
+ sp->length += SCTP_BUF_LEN(at);
+ }
+ if (srcv->sinfo_keynumber_valid) {
+ sp->auth_keyid = srcv->sinfo_keynumber;
+ } else {
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ }
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ sctp_auth_key_acquire(stcb, sp->auth_keyid);
+ sp->holds_key_ref = 1;
+ }
+ if (hold_stcb_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ sctp_snd_sb_alloc(stcb, sp->length);
+ atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
+ m = NULL;
+ if (hold_stcb_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+out_now:
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return (error);
+}
+
+
+static struct mbuf *
+sctp_copy_mbufchain(struct mbuf *clonechain,
+ struct mbuf *outchain,
+ struct mbuf **endofchain,
+ int can_take_mbuf,
+ int sizeofcpy,
+ uint8_t copy_by_ref)
+{
+ struct mbuf *m;
+ struct mbuf *appendchain;
+ caddr_t cp;
+ int len;
+
+ if (endofchain == NULL) {
+ /* error */
+ error_out:
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ if (can_take_mbuf) {
+ appendchain = clonechain;
+ } else {
+ if (!copy_by_ref &&
+ (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
+ /* Its not in a cluster */
+ if (*endofchain == NULL) {
+ /* lets get a mbuf cluster */
+ if (outchain == NULL) {
+ /* This is the general case */
+ new_mbuf:
+ outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
+ if (outchain == NULL) {
+ goto error_out;
+ }
+ SCTP_BUF_LEN(outchain) = 0;
+ *endofchain = outchain;
+ /* get the prepend space */
+ SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
+ } else {
+ /* We really should not get a NULL in endofchain */
+ /* find end */
+ m = outchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ /* sanity */
+ if (*endofchain == NULL) {
+ /* huh, TSNH XXX maybe we should panic */
+ sctp_m_freem(outchain);
+ goto new_mbuf;
+ }
+ }
+ /* get the new end of length */
+ len = (int)M_TRAILINGSPACE(*endofchain);
+ } else {
+ /* how much is left at the end? */
+ len = (int)M_TRAILINGSPACE(*endofchain);
+ }
+ /* Find the end of the data, for appending */
+ cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
+
+ /* Now lets copy it out */
+ if (len >= sizeofcpy) {
+ /* It all fits, copy it in */
+ m_copydata(clonechain, 0, sizeofcpy, cp);
+ SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+ } else {
+ /* fill up the end of the chain */
+ if (len > 0) {
+ m_copydata(clonechain, 0, len, cp);
+ SCTP_BUF_LEN((*endofchain)) += len;
+ /* now we need another one */
+ sizeofcpy -= len;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
+ if (m == NULL) {
+ /* We failed */
+ goto error_out;
+ }
+ SCTP_BUF_NEXT((*endofchain)) = m;
+ *endofchain = m;
+ cp = mtod((*endofchain), caddr_t);
+ m_copydata(clonechain, len, sizeofcpy, cp);
+ SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+ }
+ return (outchain);
+ } else {
+ /* copy the old fashion way */
+ appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
+ }
+#endif
+ }
+ }
+ if (appendchain == NULL) {
+ /* error */
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ if (outchain) {
+ /* tack on to the end */
+ if (*endofchain != NULL) {
+ SCTP_BUF_NEXT(((*endofchain))) = appendchain;
+ } else {
+ m = outchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ SCTP_BUF_NEXT(m) = appendchain;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ }
+ /*
+ * save off the end and update the end-chain
+ * position
+ */
+ m = appendchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ return (outchain);
+ } else {
+ /* save off the end and update the end-chain position */
+ m = appendchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ return (appendchain);
+ }
+}
+
+static int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int from_where,
+ struct timeval *now, int *now_filled, int frag_point, int so_locked);
+
+static void
+sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
+ uint32_t val SCTP_UNUSED)
+{
+ struct sctp_copy_all *ca;
+ struct mbuf *m;
+ int ret = 0;
+ int added_control = 0;
+ int un_sent, do_chunk_output = 1;
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+
+ ca = (struct sctp_copy_all *)ptr;
+ if (ca->m == NULL) {
+ return;
+ }
+ if (ca->inp != inp) {
+ /* TSNH */
+ return;
+ }
+ if (ca->sndlen > 0) {
+ m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
+ if (m == NULL) {
+ /* can't copy so we are done */
+ ca->cnt_failed++;
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(m, SCTP_MBUF_ICOPY);
+ }
+#endif
+ } else {
+ m = NULL;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
+ /* Abort this assoc with m as the user defined reason */
+ if (m != NULL) {
+ SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
+ } else {
+ m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_NOWAIT, 1, MT_DATA);
+ SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
+ }
+ if (m != NULL) {
+ struct sctp_paramhdr *ph;
+
+ ph = mtod(m, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
+ }
+ /* We add one here to keep the assoc from
+ * dis-appearing on us.
+ */
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
+ /* sctp_abort_an_association calls sctp_free_asoc()
+ * free association will NOT free it since we
+ * incremented the refcnt .. we do this to prevent
+ * it being freed and things getting tricky since
+ * we could end up (from free_asoc) calling inpcb_free
+ * which would get a recursive lock call to the
+ * iterator lock.. But as a consequence of that the
+ * stcb will return to us un-locked.. since free_asoc
+ * returns with either no TCB or the TCB unlocked, we
+ * must relock.. to unlock in the iterator timer :-0
+ */
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ } else {
+ if (m) {
+ ret = sctp_msg_append(stcb, net, m,
+ &ca->sndrcv, 1);
+ }
+ asoc = &stcb->asoc;
+ if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
+ /* shutdown this assoc */
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN the first time through */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ NULL);
+ added_control = 1;
+ do_chunk_output = 0;
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF should be
+ * sent with no data. currently, we will allow user
+ * data to be sent first and move to
+ * SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ abort_anyway:
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "%s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ op_err, SCTP_SO_NOT_LOCKED);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ NULL);
+ }
+ }
+
+ }
+ }
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
+
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+ do_chunk_output = 0;
+ }
+ if (do_chunk_output)
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
+ else if (added_control) {
+ int num_out, reason, now_filled = 0;
+ struct timeval now;
+ int frag_point;
+
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
+ }
+ no_chunk_output:
+ if (ret) {
+ ca->cnt_failed++;
+ } else {
+ ca->cnt_sent++;
+ }
+}
+
+static void
+sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
+{
+ struct sctp_copy_all *ca;
+
+ ca = (struct sctp_copy_all *)ptr;
+ /*
+ * Do a notify here? Kacheong suggests that the notify be done at
+ * the send time.. so you would push up a notification if any send
+ * failed. Don't know if this is feasible since the only failures we
+ * have is "memory" related and if you cannot get an mbuf to send
+ * the data you surely can't get an mbuf to send up to notify the
+ * user you can't send the data :->
+ */
+
+ /* now free everything */
+ if (ca->inp) {
+ /* Lets clear the flag to allow others to run. */
+ ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
+ }
+ sctp_m_freem(ca->m);
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+}
+
+static struct mbuf *
+sctp_copy_out_all(struct uio *uio, ssize_t len)
+{
+ struct mbuf *ret, *at;
+ ssize_t left, willcpy, cancpy, error;
+
+ ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
+ if (ret == NULL) {
+ /* TSNH */
+ return (NULL);
+ }
+ left = len;
+ SCTP_BUF_LEN(ret) = 0;
+ /* save space for the data chunk header */
+ cancpy = (int)M_TRAILINGSPACE(ret);
+ willcpy = min(cancpy, left);
+ at = ret;
+ while (left > 0) {
+ /* Align data to the end */
+ error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
+ if (error) {
+ err_out_now:
+ sctp_m_freem(at);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(at) = (int)willcpy;
+ SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
+ left -= willcpy;
+ if (left > 0) {
+ SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
+ if (SCTP_BUF_NEXT(at) == NULL) {
+ goto err_out_now;
+ }
+ at = SCTP_BUF_NEXT(at);
+ SCTP_BUF_LEN(at) = 0;
+ cancpy = (int)M_TRAILINGSPACE(at);
+ willcpy = min(cancpy, left);
+ }
+ }
+ return (ret);
+}
+
+static int
+sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv)
+{
+ int ret;
+ struct sctp_copy_all *ca;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
+ /* There is another. */
+ return (EBUSY);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
+#else
+ if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
+#endif
+#else
+ if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
+#endif
+ /* You must not be larger than the limit! */
+ return (EMSGSIZE);
+ }
+ SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
+ SCTP_M_COPYAL);
+ if (ca == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(ca, 0, sizeof(struct sctp_copy_all));
+
+ ca->inp = inp;
+ if (srcv) {
+ memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
+ }
+ /*
+ * take off the sendall flag, it would be bad if we failed to do
+ * this :-0
+ */
+ ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
+ /* get length and mbuf chain */
+ if (uio) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ ca->sndlen = uio->uio_resid;
+#else
+ ca->sndlen = uio_resid(uio);
+#endif
+#else
+ ca->sndlen = uio->uio_resid;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
+#endif
+ ca->m = sctp_copy_out_all(uio, ca->sndlen);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
+#endif
+ if (ca->m == NULL) {
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ } else {
+ /* Gather the length of the send */
+ struct mbuf *mat;
+
+ ca->sndlen = 0;
+ for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
+ ca->sndlen += SCTP_BUF_LEN(mat);
+ }
+ }
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
+ ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
+ SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)ca, 0,
+ sctp_sendall_completes, inp, 1);
+ if (ret) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return (EFAULT);
+ }
+ return (0);
+}
+
+
+void
+sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ }
+}
+
+void
+sctp_toss_old_asconf(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_asconf_chunk *acp;
+
+ asoc = &stcb->asoc;
+ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+ /* find SCTP_ASCONF chunk in queue */
+ if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+ if (chk->data) {
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
+ /* Not Acked yet */
+ break;
+ }
+ }
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ }
+}
+
+
+static void
+sctp_clean_up_datalist(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_tmit_chunk **data_list,
+ int bundle_at,
+ struct sctp_nets *net)
+{
+ int i;
+ struct sctp_tmit_chunk *tp1;
+
+ for (i = 0; i < bundle_at; i++) {
+ /* off of the send queue */
+ TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
+ asoc->send_queue_cnt--;
+ if (i > 0) {
+ /*
+ * Any chunk NOT 0 you zap the time chunk 0 gets
+ * zapped or set based on if a RTO measurment is
+ * needed.
+ */
+ data_list[i]->do_rtt = 0;
+ }
+ /* record time */
+ data_list[i]->sent_rcv_time = net->last_sent_time;
+ data_list[i]->rec.data.cwnd_at_send = net->cwnd;
+ data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
+ if (data_list[i]->whoTo == NULL) {
+ data_list[i]->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
+ /* on to the sent queue */
+ tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
+ if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
+ struct sctp_tmit_chunk *tpp;
+
+ /* need to move back */
+ back_up_more:
+ tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
+ if (tpp == NULL) {
+ TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
+ goto all_done;
+ }
+ tp1 = tpp;
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
+ goto back_up_more;
+ }
+ TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
+ } else {
+ TAILQ_INSERT_TAIL(&asoc->sent_queue,
+ data_list[i],
+ sctp_next);
+ }
+ all_done:
+ /* This does not lower until the cum-ack passes it */
+ asoc->sent_queue_cnt++;
+ if ((asoc->peers_rwnd <= 0) &&
+ (asoc->total_flight == 0) &&
+ (bundle_at == 1)) {
+ /* Mark the chunk as being a window probe */
+ SCTP_STAT_INCR(sctps_windowprobed);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 3);
+#endif
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ data_list[i]->snd_count = 1;
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+ data_list[i]->whoTo->flight_size,
+ data_list[i]->book_size,
+ (uint32_t)(uintptr_t)data_list[i]->whoTo,
+ data_list[i]->rec.data.tsn);
+ }
+ sctp_flight_size_increase(data_list[i]);
+ sctp_total_flight_increase(stcb, data_list[i]);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ }
+ if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
+ (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
+ }
+}
+
+static void
+sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+ /* Stray chunks must be cleaned up */
+ clean_up_anyway:
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ asoc->fwd_tsn_cnt--;
+ }
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ /* special handling, we must look into the param */
+ if (chk != asoc->str_reset) {
+ goto clean_up_anyway;
+ }
+ }
+ }
+}
+
+static uint32_t
+sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
+ uint32_t space_left, uint32_t frag_point, int eeor_on)
+{
+ /* Make a decision on if I should split a
+ * msg into multiple parts. This is only asked of
+ * incomplete messages.
+ */
+ if (eeor_on) {
+ /* If we are doing EEOR we need to always send
+ * it if its the entire thing, since it might
+ * be all the guy is putting in the hopper.
+ */
+ if (space_left >= length) {
+ /*-
+ * If we have data outstanding,
+ * we get another chance when the sack
+ * arrives to transmit - wait for more data
+ */
+ if (stcb->asoc.total_flight == 0) {
+ /* If nothing is in flight, we zero
+ * the packet counter.
+ */
+ return (length);
+ }
+ return (0);
+
+ } else {
+ /* You can fill the rest */
+ return (space_left);
+ }
+ }
+ /*-
+ * For those strange folk that make the send buffer
+ * smaller than our fragmentation point, we can't
+ * get a full msg in so we have to allow splitting.
+ */
+ if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
+ return (length);
+ }
+ if ((length <= space_left) ||
+ ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
+ /* Sub-optimial residual don't split in non-eeor mode. */
+ return (0);
+ }
+ /* If we reach here length is larger
+ * than the space_left. Do we wish to split
+ * it for the sake of packet putting together?
+ */
+ if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
+ /* Its ok to split it */
+ return (min(space_left, frag_point));
+ }
+ /* Nope, can't split */
+ return (0);
+}
+
+static uint32_t
+sctp_move_to_outqueue(struct sctp_tcb *stcb,
+ struct sctp_stream_out *strq,
+ uint32_t space_left,
+ uint32_t frag_point,
+ int *giveup,
+ int eeor_mode,
+ int *bail,
+ int so_locked)
+{
+ /* Move from the stream to the send_queue keeping track of the total */
+ struct sctp_association *asoc;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_data_chunk *dchkh=NULL;
+ struct sctp_idata_chunk *ndchkh=NULL;
+ uint32_t to_move, length;
+ int leading;
+ uint8_t rcv_flags = 0;
+ uint8_t some_taken;
+ uint8_t send_lock_up = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+one_more_time:
+ /*sa_ignore FREED_MEMORY*/
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp == NULL) {
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp) {
+ goto one_more_time;
+ }
+ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
+ (stcb->asoc.idata_supported == 0) &&
+ (strq->last_msg_incomplete)) {
+ SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
+ strq->sid,
+ strq->last_msg_incomplete);
+ strq->last_msg_incomplete = 0;
+ }
+ to_move = 0;
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ send_lock_up = 0;
+ }
+ goto out_of;
+ }
+ if ((sp->msg_is_complete) && (sp->length == 0)) {
+ if (sp->sender_all_done) {
+ /* We are doing deferred cleanup. Last
+ * time through when we took all the data
+ * the sender_all_done was not set.
+ */
+ if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out,
+ send_lock_up);
+ }
+ if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
+ if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
+ (strq->chunks_on_queues == 0) &&
+ TAILQ_EMPTY(&strq->outqueue)) {
+ stcb->asoc.trigger_reset = 1;
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp, so_locked);
+ /* we can't be locked to it */
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ send_lock_up = 0;
+ }
+ /* back to get the next msg */
+ goto one_more_time;
+ } else {
+ /* sender just finished this but
+ * still holds a reference
+ */
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ } else {
+ /* is there some to get */
+ if (sp->length == 0) {
+ /* no */
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ } else if (sp->discard_rest) {
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ /* Whack down the size */
+ atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
+ if ((stcb->sctp_socket != NULL) &&
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ sp->length = 0;
+ sp->some_taken = 1;
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ }
+ some_taken = sp->some_taken;
+re_look:
+ length = sp->length;
+ if (sp->msg_is_complete) {
+ /* The message is complete */
+ to_move = min(length, frag_point);
+ if (to_move == length) {
+ /* All of it fits in the MTU */
+ if (sp->some_taken) {
+ rcv_flags |= SCTP_DATA_LAST_FRAG;
+ } else {
+ rcv_flags |= SCTP_DATA_NOT_FRAG;
+ }
+ sp->put_last_out = 1;
+ if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
+ rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+ }
+ } else {
+ /* Not all of it fits, we fragment */
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ }
+ sp->some_taken = 1;
+ }
+ } else {
+ to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
+ if (to_move) {
+ /*-
+ * We use a snapshot of length in case it
+ * is expanding during the compare.
+ */
+ uint32_t llen;
+
+ llen = length;
+ if (to_move >= llen) {
+ to_move = llen;
+ if (send_lock_up == 0) {
+ /*-
+ * We are taking all of an incomplete msg
+ * thus we need a send lock.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ if (sp->msg_is_complete) {
+ /* the sender finished the msg */
+ goto re_look;
+ }
+ }
+ }
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ sp->some_taken = 1;
+ }
+ } else {
+ /* Nothing to take. */
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ }
+
+ /* If we reach here, we can copy out a chunk */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No chunk memory */
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ /* Setup for unordered if needed by looking
+ * at the user sent info flags.
+ */
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ rcv_flags |= SCTP_DATA_UNORDERED;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
+ (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
+ rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+ }
+ /* clear out the chunk before setting up */
+ memset(chk, 0, sizeof(*chk));
+ chk->rec.data.rcv_flags = rcv_flags;
+
+ if (to_move >= length) {
+ /* we think we can steal the whole thing */
+ if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ if (to_move < sp->length) {
+ /* bail, it changed */
+ goto dont_do_it;
+ }
+ chk->data = sp->data;
+ chk->last_mbuf = sp->tail_mbuf;
+ /* register the stealing */
+ sp->data = sp->tail_mbuf = NULL;
+ } else {
+ struct mbuf *m;
+ dont_do_it:
+ chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
+ chk->last_mbuf = NULL;
+ if (chk->data == NULL) {
+ sp->some_taken = some_taken;
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ *bail = 1;
+ to_move = 0;
+ goto out_of;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
+ }
+#endif
+ /* Pull off the data */
+ m_adj(sp->data, to_move);
+ /* Now lets work our way down and compact it */
+ m = sp->data;
+ while (m && (SCTP_BUF_LEN(m) == 0)) {
+ sp->data = SCTP_BUF_NEXT(m);
+ SCTP_BUF_NEXT(m) = NULL;
+ if (sp->tail_mbuf == m) {
+ /*-
+ * Freeing tail? TSNH since
+ * we supposedly were taking less
+ * than the sp->length.
+ */
+#ifdef INVARIANTS
+ panic("Huh, freing tail? - TSNH");
+#else
+ SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
+ sp->tail_mbuf = sp->data = NULL;
+ sp->length = 0;
+#endif
+
+ }
+ sctp_m_free(m);
+ m = sp->data;
+ }
+ }
+ if (SCTP_BUF_IS_EXTENDED(chk->data)) {
+ chk->copy_by_ref = 1;
+ } else {
+ chk->copy_by_ref = 0;
+ }
+ /* get last_mbuf and counts of mb usage
+ * This is ugly but hopefully its only one mbuf.
+ */
+ if (chk->last_mbuf == NULL) {
+ chk->last_mbuf = chk->data;
+ while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
+ chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
+ }
+ }
+
+ if (to_move > length) {
+ /*- This should not happen either
+ * since we always lower to_move to the size
+ * of sp->length if its larger.
+ */
+#ifdef INVARIANTS
+ panic("Huh, how can to_move be larger?");
+#else
+ SCTP_PRINTF("Huh, how can to_move be larger?\n");
+ sp->length = 0;
+#endif
+ } else {
+ atomic_subtract_int(&sp->length, to_move);
+ }
+ leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
+ if (M_LEADINGSPACE(chk->data) < leading) {
+ /* Not enough room for a chunk header, get some */
+ struct mbuf *m;
+
+ m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /*
+ * we're in trouble here. _PREPEND below will free
+ * all the data if there is no leading space, so we
+ * must put the data back and restore.
+ */
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ if (sp->data == NULL) {
+ /* unsteal the data */
+ sp->data = chk->data;
+ sp->tail_mbuf = chk->last_mbuf;
+ } else {
+ struct mbuf *m_tmp;
+ /* reassemble the data */
+ m_tmp = sp->data;
+ sp->data = chk->data;
+ SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
+ }
+ sp->some_taken = some_taken;
+ atomic_add_int(&sp->length, to_move);
+ chk->data = NULL;
+ *bail = 1;
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ to_move = 0;
+ goto out_of;
+ } else {
+ SCTP_BUF_LEN(m) = 0;
+ SCTP_BUF_NEXT(m) = chk->data;
+ chk->data = m;
+ M_ALIGN(chk->data, 4);
+ }
+ }
+ SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
+ if (chk->data == NULL) {
+ /* HELP, TSNH since we assured it would not above? */
+#ifdef INVARIANTS
+ panic("prepend failes HELP?");
+#else
+ SCTP_PRINTF("prepend fails HELP?\n");
+ sctp_free_a_chunk(stcb, chk, so_locked);
+#endif
+ *bail = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ chk->book_size_scale = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->pad_inplace = 0;
+ chk->no_fr_allowed = 0;
+ if (stcb->asoc.idata_supported == 0) {
+ if (rcv_flags & SCTP_DATA_UNORDERED) {
+ /* Just use 0. The receiver ignores the values. */
+ chk->rec.data.mid = 0;
+ } else {
+ chk->rec.data.mid = strq->next_mid_ordered;
+ if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+ strq->next_mid_ordered++;
+ }
+ }
+ } else {
+ if (rcv_flags & SCTP_DATA_UNORDERED) {
+ chk->rec.data.mid = strq->next_mid_unordered;
+ if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+ strq->next_mid_unordered++;
+ }
+ } else {
+ chk->rec.data.mid = strq->next_mid_ordered;
+ if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+ strq->next_mid_ordered++;
+ }
+ }
+ }
+ chk->rec.data.sid = sp->sid;
+ chk->rec.data.ppid = sp->ppid;
+ chk->rec.data.context = sp->context;
+ chk->rec.data.doing_fast_retransmit = 0;
+
+ chk->rec.data.timetodrop = sp->ts;
+ chk->flags = sp->act_flags;
+
+ if (sp->net) {
+ chk->whoTo = sp->net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ } else
+ chk->whoTo = NULL;
+
+ if (sp->holds_key_ref) {
+ chk->auth_keyid = sp->auth_keyid;
+ sctp_auth_key_acquire(stcb, chk->auth_keyid);
+ chk->holds_key_ref = 1;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
+#else
+ chk->rec.data.tsn = asoc->sending_seq++;
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
+ sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
+ (uint32_t)(uintptr_t)stcb, sp->length,
+ (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
+ chk->rec.data.tsn);
+ }
+ if (stcb->asoc.idata_supported == 0) {
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ } else {
+ ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
+ }
+ /*
+ * Put the rest of the things in place now. Size was done
+ * earlier in previous loop prior to padding.
+ */
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
+ asoc->tsn_out_at = 0;
+ asoc->tsn_out_wrapped = 1;
+ }
+ asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
+ asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
+ asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
+ asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
+ asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
+ asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
+ asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
+ asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
+ asoc->tsn_out_at++;
+#endif
+ if (stcb->asoc.idata_supported == 0) {
+ dchkh->ch.chunk_type = SCTP_DATA;
+ dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ dchkh->dp.tsn = htonl(chk->rec.data.tsn);
+ dchkh->dp.sid = htons(strq->sid);
+ dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
+ dchkh->dp.ppid = chk->rec.data.ppid;
+ dchkh->ch.chunk_length = htons(chk->send_size);
+ } else {
+ ndchkh->ch.chunk_type = SCTP_IDATA;
+ ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
+ ndchkh->dp.sid = htons(strq->sid);
+ ndchkh->dp.reserved = htons(0);
+ ndchkh->dp.mid = htonl(chk->rec.data.mid);
+ if (sp->fsn == 0)
+ ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
+ else
+ ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
+ sp->fsn++;
+ ndchkh->ch.chunk_length = htons(chk->send_size);
+ }
+ /* Now advance the chk->send_size by the actual pad needed. */
+ if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
+ /* need a pad */
+ struct mbuf *lm;
+ int pads;
+
+ pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
+ lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
+ if (lm != NULL) {
+ chk->last_mbuf = lm;
+ chk->pad_inplace = 1;
+ }
+ chk->send_size += pads;
+ }
+ if (PR_SCTP_ENABLED(chk->flags)) {
+ asoc->pr_sctp_cnt++;
+ }
+ if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
+ /* All done pull and kill the message */
+ if (sp->put_last_out == 0) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out,
+ send_lock_up);
+ }
+ if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
+ if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
+ (strq->chunks_on_queues == 0) &&
+ TAILQ_EMPTY(&strq->outqueue)) {
+ stcb->asoc.trigger_reset = 1;
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp, so_locked);
+ }
+ asoc->chunks_on_out_queue++;
+ strq->chunks_on_queues++;
+ TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt++;
+out_of:
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return (to_move);
+}
+
+
+static void
+sctp_fill_outqueue(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *strq;
+ uint32_t space_left, moved, total_moved;
+ int bail, giveup;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ total_moved = 0;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ space_left = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ space_left = net->mtu - sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ /* TSNH */
+ space_left = net->mtu;
+ break;
+ }
+ /* Need an allowance for the data chunk header too */
+ space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
+
+ /* must make even word boundary */
+ space_left &= 0xfffffffc;
+ strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
+ giveup = 0;
+ bail = 0;
+ while ((space_left > 0) && (strq != NULL)) {
+ moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
+ &giveup, eeor_mode, &bail, so_locked);
+ stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
+ if ((giveup != 0) || (bail != 0)) {
+ break;
+ }
+ strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
+ total_moved += moved;
+ if (space_left >= moved) {
+ space_left -= moved;
+ } else {
+ space_left = 0;
+ }
+ if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
+ space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
+ } else {
+ space_left = 0;
+ }
+ space_left &= 0xfffffffc;
+ }
+ if (bail != 0)
+ *quit_now = 1;
+
+ stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
+
+ if (total_moved == 0) {
+ if ((stcb->asoc.sctp_cmt_on_off == 0) &&
+ (net == stcb->asoc.primary_destination)) {
+ /* ran dry for primary network net */
+ SCTP_STAT_INCR(sctps_primary_randry);
+ } else if (stcb->asoc.sctp_cmt_on_off > 0) {
+ /* ran dry with CMT on */
+ SCTP_STAT_INCR(sctps_cmt_randry);
+ }
+ }
+}
+
+void
+sctp_fix_ecn_echo(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ }
+ }
+}
+
+void
+sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_queue_pending *sp;
+ unsigned int i;
+
+ if (net == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
+ if (sp->net == net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ }
+}
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int from_where,
+ struct timeval *now, int *now_filled, int frag_point, int so_locked)
+{
+ /**
+ * Ok this is the generic chunk service queue. we must do the
+ * following:
+ * - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e. FIRST/MIDDLE and
+ * LAST to the out queue in one pass) and assigning TSN's. This
+ * only applys though if the peer does not support NDATA. For NDATA
+ * chunks its ok to not send the entire message ;-)
+ * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
+ * fomulate and send the low level chunks. Making sure to combine
+ * any control in the control chunk queue also.
+ */
+ struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
+ struct mbuf *outchain, *endoutchain;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ /* temp arrays for unlinking */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ int no_fragmentflg, error;
+ unsigned int max_rwnd_per_dest, max_send_per_dest;
+ int one_chunk, hbflag, skip_data_for_this_net;
+ int asconf, cookie, no_out_cnt;
+ int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
+ unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
+ int tsns_sent = 0;
+ uint32_t auth_offset;
+ struct sctp_auth_chunk *auth;
+ uint16_t auth_keyid;
+ int override_ok = 1;
+ int skip_fill_up = 0;
+ int data_auth_reqd = 0;
+ /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
+ the destination. */
+ int quit_now = 0;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ *num_out = 0;
+ *reason_code = 0;
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
+ eeor_mode = 1;
+ } else {
+ eeor_mode = 0;
+ }
+ ctl_cnt = no_out_cnt = asconf = cookie = 0;
+ /*
+ * First lets prime the pump. For each destination, if there is room
+ * in the flight size, attempt to pull an MTU's worth out of the
+ * stream queues into the general send_queue
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 2);
+#endif
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ hbflag = 0;
+ if (control_only)
+ no_data_chunks = 1;
+ else
+ no_data_chunks = 0;
+
+ /* Nothing to possible to send? */
+ if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
+ (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ sctp_is_there_unsent_data(stcb, so_locked) == 0) {
+ nothing_to_send:
+ *reason_code = 9;
+ return (0);
+ }
+ if (asoc->peers_rwnd == 0) {
+ /* No room in peers rwnd */
+ *reason_code = 1;
+ if (asoc->total_flight > 0) {
+ /* we are allowed one chunk in flight */
+ no_data_chunks = 1;
+ }
+ }
+ if (stcb->asoc.ecn_echo_cnt_onq) {
+ /* Record where a sack goes, if any */
+ if (no_data_chunks &&
+ (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
+ /* Nothing but ECNe to send - we don't do that */
+ goto nothing_to_send;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+ sack_goes_to = chk->whoTo;
+ break;
+ }
+ }
+ }
+ max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
+ if (stcb->sctp_socket)
+ max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
+ else
+ max_send_per_dest = 0;
+ if (no_data_chunks == 0) {
+ /* How many non-directed chunks are there? */
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == NULL) {
+ /* We already have non-directed
+ * chunks on the queue, no need
+ * to do a fill-up.
+ */
+ skip_fill_up = 1;
+ break;
+ }
+ }
+
+ }
+ if ((no_data_chunks == 0) &&
+ (skip_fill_up == 0) &&
+ (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /*
+ * This for loop we are in takes in
+ * each net, if its's got space in cwnd and
+ * has data sent to it (when CMT is off) then it
+ * calls sctp_fill_outqueue for the net. This gets
+ * data on the send queue for that network.
+ *
+ * In sctp_fill_outqueue TSN's are assigned and
+ * data is copied out of the stream buffers. Note
+ * mostly copy by reference (we hope).
+ */
+ net->window_probe = 0;
+ if ((net != stcb->asoc.alternate) &&
+ ((net->dest_state & SCTP_ADDR_PF) ||
+ (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
+ (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 1,
+ SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ continue;
+ }
+ if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
+ (net->flight_size == 0)) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this network, no room - can't fill */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 3,
+ SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ continue;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
+ if (quit_now) {
+ /* memory alloc failure */
+ no_data_chunks = 1;
+ break;
+ }
+ }
+ }
+ /* now service each destination and send out what we can for it */
+ /* Nothing to send? */
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue)) {
+ *reason_code = 8;
+ return (0);
+ }
+
+ if (asoc->sctp_cmt_on_off > 0) {
+ /* get the last start point */
+ start_at = asoc->last_net_cmt_send_started;
+ if (start_at == NULL) {
+ /* null so to beginning */
+ start_at = TAILQ_FIRST(&asoc->nets);
+ } else {
+ start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
+ if (start_at == NULL) {
+ start_at = TAILQ_FIRST(&asoc->nets);
+ }
+ }
+ asoc->last_net_cmt_send_started = start_at;
+ } else {
+ start_at = TAILQ_FIRST(&asoc->nets);
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate) {
+ chk->whoTo = asoc->alternate;
+ } else {
+ chk->whoTo = asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ }
+ old_start_at = NULL;
+again_one_more_time:
+ for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
+ /* how much can we send? */
+ /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
+ if (old_start_at && (old_start_at == net)) {
+ /* through list ocmpletely. */
+ break;
+ }
+ tsns_sent = 0xa;
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ (net->flight_size >= net->cwnd)) {
+ /* Nothing on control or asconf and flight is full, we can skip
+ * even in the CMT case.
+ */
+ continue;
+ }
+ bundle_at = 0;
+ endoutchain = outchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ no_fragmentflg = 1;
+ one_chunk = 0;
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ skip_data_for_this_net = 1;
+ } else {
+ skip_data_for_this_net = 0;
+ }
+ switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ mtu = net->mtu - sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
+ }
+ mx_mtu = mtu;
+ to_out = 0;
+ if (mtu > asoc->peers_rwnd) {
+ if (asoc->total_flight > 0) {
+ /* We have a packet in flight somewhere */
+ r_mtu = asoc->peers_rwnd;
+ } else {
+ /* We are always allowed to send one MTU out */
+ one_chunk = 1;
+ r_mtu = mtu;
+ }
+ } else {
+ r_mtu = mtu;
+ }
+ error = 0;
+ /************************/
+ /* ASCONF transmission */
+ /************************/
+ /* Now first lets go through the asconf queue */
+ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+ if (chk->rec.chunk_id.id != SCTP_ASCONF) {
+ continue;
+ }
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate == NULL) {
+ if (asoc->primary_destination != net) {
+ break;
+ }
+ } else {
+ if (asoc->alternate != net) {
+ break;
+ }
+ }
+ } else {
+ if (chk->whoTo != net) {
+ break;
+ }
+ }
+ if (chk->data == NULL) {
+ break;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT &&
+ chk->sent != SCTP_DATAGRAM_RESEND) {
+ break;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->send_size < (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->send_size + omtu))
+ mtu -= (chk->send_size + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->send_size + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /*
+ * set hb flag since we can
+ * use these for RTO
+ */
+ hbflag = 1;
+ asconf = 1;
+ /*
+ * should sysctl this: don't
+ * bundle data with ASCONF
+ * since it requires AUTH
+ */
+ no_data_chunks = 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ if (chk->whoTo == NULL) {
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
+ chk->snd_count++;
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ /*
+ * do NOT clear the asconf
+ * flag as it is used to do
+ * appropriate source address
+ * selection.
+ */
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(now);
+ *now_filled = 1;
+ }
+ net->last_sent_time = *now;
+ hbflag = 0;
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain, auth_offset, auth,
+ stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ /* error, we could not output */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ /* error, could not output */
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 7;
+ break;
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ mtu = net->mtu - sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ if (error != 0) {
+ /* try next net */
+ continue;
+ }
+ /************************/
+ /* Control transmission */
+ /************************/
+ /* Now first lets go through the control queue */
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+ if ((sack_goes_to) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
+ (chk->whoTo != sack_goes_to)) {
+ /*
+ * if we have a sack in queue, and we are looking at an
+ * ecn echo that is NOT queued to where the sack is going..
+ */
+ if (chk->whoTo == net) {
+ /* Don't transmit it to where its going (current net) */
+ continue;
+ } else if (sack_goes_to == net) {
+ /* But do transmit it to this address */
+ goto skip_net_check;
+ }
+ }
+ if (chk->whoTo == NULL) {
+ if (asoc->alternate == NULL) {
+ if (asoc->primary_destination != net) {
+ continue;
+ }
+ } else {
+ if (asoc->alternate != net) {
+ continue;
+ }
+ }
+ } else {
+ if (chk->whoTo != net) {
+ continue;
+ }
+ }
+ skip_net_check:
+ if (chk->data == NULL) {
+ continue;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * It must be unsent. Cookies and ASCONF's
+ * hang around but there timers will force
+ * when marked for resend.
+ */
+ continue;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->send_size <= (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->send_size + omtu))
+ mtu -= (chk->send_size + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->send_size + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /* Mark things to be removed, if needed */
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+ if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
+ hbflag = 1;
+ }
+ /* remove these chunks at the end */
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+ /* turn off the timer */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ inp, stcb, NULL,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
+ }
+ }
+ ctl_cnt++;
+ } else {
+ /*
+ * Other chunks, since they have
+ * timers running (i.e. COOKIE)
+ * we just "trust" that it
+ * gets sent or retransmitted.
+ */
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ cookie = 1;
+ no_out_cnt = 1;
+ } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ /*
+ * Increment ecne send count here
+ * this means we may be over-zealous in
+ * our counting if the send fails, but its
+ * the best place to do it (we used to do
+ * it in the queue of the chunk, but that did
+ * not tell how many times it was sent.
+ */
+ SCTP_STAT_INCR(sctps_sendecne);
+ }
+ chk->sent = SCTP_DATAGRAM_SENT;
+ if (chk->whoTo == NULL) {
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
+ chk->snd_count++;
+ }
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ /*
+ * do NOT clear the asconf
+ * flag as it is used to do
+ * appropriate source address
+ * selection.
+ */
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ /* Only HB or ASCONF advances time */
+ if (hbflag) {
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(now);
+ *now_filled = 1;
+ }
+ net->last_sent_time = *now;
+ hbflag = 0;
+ }
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain,
+ auth_offset, auth,
+ stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ /* error, we could not output */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 7;
+ break;
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ mtu = net->mtu - sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ if (error != 0) {
+ /* try next net */
+ continue;
+ }
+ /* JRI: if dest is in PF state, do not send data to it */
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ (net != stcb->asoc.alternate) &&
+ (net->dest_state & SCTP_ADDR_PF)) {
+ goto no_data_fill;
+ }
+ if (net->flight_size >= net->cwnd) {
+ goto no_data_fill;
+ }
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
+ (net->flight_size > max_rwnd_per_dest)) {
+ goto no_data_fill;
+ }
+ /*
+ * We need a specific accounting for the usage of the
+ * send buffer. We also need to check the number of messages
+ * per net. For now, this is better than nothing and it
+ * disabled by default...
+ */
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
+ (max_send_per_dest > 0) &&
+ (net->flight_size > max_send_per_dest)) {
+ goto no_data_fill;
+ }
+ /*********************/
+ /* Data transmission */
+ /*********************/
+ /*
+ * if AUTH for DATA is required and no AUTH has been added
+ * yet, account for this in the mtu now... if no data can be
+ * bundled, this adjustment won't matter anyways since the
+ * packet will be going out...
+ */
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks);
+ if (data_auth_reqd && (auth == NULL)) {
+ mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ }
+ /* now lets add any data within the MTU constraints */
+ switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (net->mtu > SCTP_MIN_V4_OVERHEAD)
+ omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ else
+ omtu = 0;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (net->mtu > SCTP_MIN_OVERHEAD)
+ omtu = net->mtu - SCTP_MIN_OVERHEAD;
+ else
+ omtu = 0;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (net->mtu > sizeof(struct sctphdr)) {
+ omtu = net->mtu - sizeof(struct sctphdr);
+ } else {
+ omtu = 0;
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ omtu = 0;
+ break;
+ }
+ if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (skip_data_for_this_net == 0)) ||
+ (cookie)) {
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+ if (no_data_chunks) {
+ /* let only control go out */
+ *reason_code = 1;
+ break;
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this net, no room for data */
+ *reason_code = 2;
+ break;
+ }
+ if ((chk->whoTo != NULL) &&
+ (chk->whoTo != net)) {
+ /* Don't send the chunk on this net */
+ continue;
+ }
+
+ if (asoc->sctp_cmt_on_off == 0) {
+ if ((asoc->alternate) &&
+ (asoc->alternate != net) &&
+ (chk->whoTo == NULL)) {
+ continue;
+ } else if ((net != asoc->primary_destination) &&
+ (asoc->alternate == NULL) &&
+ (chk->whoTo == NULL)) {
+ continue;
+ }
+ }
+ if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
+ /*-
+ * strange, we have a chunk that is
+ * to big for its destination and
+ * yet no fragment ok flag.
+ * Something went wrong when the
+ * PMTU changed...we did not mark
+ * this chunk for some reason?? I
+ * will fix it here by letting IP
+ * fragment it for now and printing
+ * a warning. This really should not
+ * happen ...
+ */
+ SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
+ chk->send_size, mtu);
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ struct sctp_data_chunk *dchkh;
+
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+ }
+ if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
+ ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
+ /* ok we will add this one */
+
+ /*
+ * Add an AUTH chunk, if chunk
+ * requires it, save the offset into
+ * the chain for AUTH
+ */
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ /* use this data's keyid */
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ } else if (auth_keyid != chk->auth_keyid) {
+ /* different keyid, so done bundling */
+ break;
+ }
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ *reason_code = 3;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* upate our MTU size */
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* unsigned subtraction of mtu */
+ if (mtu > chk->send_size)
+ mtu -= chk->send_size;
+ else
+ mtu = 0;
+ /* unsigned subtraction of r_mtu */
+ if (r_mtu > chk->send_size)
+ r_mtu -= chk->send_size;
+ else
+ r_mtu = 0;
+
+ to_out += chk->send_size;
+ if ((to_out > mx_mtu) && no_fragmentflg) {
+#ifdef INVARIANTS
+ panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
+#else
+ SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
+ mx_mtu, to_out);
+#endif
+ }
+ chk->window_probe = 0;
+ data_list[bundle_at++] = chk;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ break;
+ }
+ if (chk->sent == SCTP_DATAGRAM_UNSENT) {
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
+ }
+ if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
+ ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
+ /* Count number of user msg's that were fragmented
+ * we do this by counting when we see a LAST fragment
+ * only.
+ */
+ SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
+ }
+ if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
+ if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
+ data_list[0]->window_probe = 1;
+ net->window_probe = 1;
+ }
+ break;
+ }
+ } else {
+ /*
+ * Must be sent in order of the
+ * TSN's (on a network)
+ */
+ break;
+ }
+ } /* for (chunk gather loop for this net) */
+ } /* if asoc.state OPEN */
+ no_data_fill:
+ /* Is there something to send for this destination? */
+ if (outchain) {
+ /* We may need to start a control timer or two */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+ stcb, net);
+ /*
+ * do NOT clear the asconf flag as it is used
+ * to do appropriate source address selection.
+ */
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ /* must start a send timer if data is being sent */
+ if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ if (bundle_at || hbflag) {
+ /* For data/asconf and hb set time */
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(now);
+ *now_filled = 1;
+ }
+ net->last_sent_time = *now;
+ }
+ /* Now send it, if there is anything to send :> */
+ if ((error = sctp_lowlevel_chunk_output(inp,
+ stcb,
+ net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain,
+ auth_offset,
+ auth,
+ auth_keyid,
+ no_fragmentflg,
+ bundle_at,
+ asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ /* error, we could not output */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 6;
+ /*-
+ * I add this line to be paranoid. As far as
+ * I can tell the continue, takes us back to
+ * the top of the for, but just to make sure
+ * I will reset these again here.
+ */
+ ctl_cnt = bundle_at = 0;
+ continue; /* This takes us back to the for() for the nets. */
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt) {
+ *num_out += (ctl_cnt + bundle_at);
+ }
+ if (bundle_at) {
+ /* setup for a RTO measurement */
+ tsns_sent = data_list[0]->rec.data.tsn;
+ /* fill time if not already filled */
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+ *now_filled = 1;
+ *now = asoc->time_last_sent;
+ } else {
+ asoc->time_last_sent = *now;
+ }
+ if (net->rto_needed) {
+ data_list[0]->do_rtt = 1;
+ net->rto_needed = 0;
+ }
+ SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
+ sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
+ }
+ if (one_chunk) {
+ break;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
+ }
+ }
+ if (old_start_at == NULL) {
+ old_start_at = start_at;
+ start_at = TAILQ_FIRST(&asoc->nets);
+ if (old_start_at)
+ goto again_one_more_time;
+ }
+
+ /*
+ * At the end there should be no NON timed chunks hanging on this
+ * queue.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
+ }
+ if ((*num_out == 0) && (*reason_code == 0)) {
+ *reason_code = 4;
+ } else {
+ *reason_code = 5;
+ }
+ sctp_clean_up_ctl(stcb, asoc, so_locked);
+ return (0);
+}
+
+void
+sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
+{
+ /*-
+ * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
+ * the control chunk queue.
+ */
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *mat, *last_mbuf;
+ uint32_t chunk_length;
+ uint16_t padding_length;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
+ if (op_err == NULL) {
+ return;
+ }
+ last_mbuf = NULL;
+ chunk_length = 0;
+ for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
+ chunk_length += SCTP_BUF_LEN(mat);
+ if (SCTP_BUF_NEXT(mat) == NULL) {
+ last_mbuf = mat;
+ }
+ }
+ if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
+ sctp_m_freem(op_err);
+ return;
+ }
+ padding_length = chunk_length % 4;
+ if (padding_length != 0) {
+ padding_length = 4 - padding_length;
+ }
+ if (padding_length != 0) {
+ if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
+ sctp_m_freem(op_err);
+ return;
+ }
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(op_err);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->send_size = (uint16_t)chunk_length;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = op_err;
+ chk->whoTo = NULL;
+ hdr = mtod(op_err, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_OPERATION_ERROR;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+int
+sctp_send_cookie_echo(struct mbuf *m,
+ int offset, int limit,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /*-
+ * pull out the cookie and put it at the front of the control chunk
+ * queue.
+ */
+ int at;
+ struct mbuf *cookie;
+ struct sctp_paramhdr param, *phdr;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ uint16_t ptype, plen;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* First find the cookie in the param area */
+ cookie = NULL;
+ at = offset + sizeof(struct sctp_init_chunk);
+ for (;;) {
+ phdr = sctp_get_next_param(m, at, &param, sizeof(param));
+ if (phdr == NULL) {
+ return (-3);
+ }
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (plen < sizeof(struct sctp_paramhdr)) {
+ return (-6);
+ }
+ if (ptype == SCTP_STATE_COOKIE) {
+ int pad;
+
+ /* found the cookie */
+ if (at + plen > limit) {
+ return (-7);
+ }
+ cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
+ if (cookie == NULL) {
+ /* No memory */
+ return (-2);
+ }
+ if ((pad = (plen % 4)) > 0) {
+ pad = 4 - pad;
+ }
+ if (pad > 0) {
+ if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
+ return (-8);
+ }
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
+ }
+#endif
+ break;
+ }
+ at += SCTP_SIZE32(plen);
+ }
+ /* ok, we got the cookie lets change it into a cookie echo chunk */
+ /* first the change from param to cookie */
+ hdr = mtod(cookie, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ECHO;
+ hdr->chunk_flags = 0;
+ /* get the chunk stuff now and place it in the FRONT of the queue */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie);
+ return (-5);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+ chk->send_size = SCTP_SIZE32(plen);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
+ struct mbuf *m,
+ int offset,
+ int chk_length,
+ struct sctp_nets *net)
+{
+ /*
+ * take a HB request and make it into a HB ack and send it.
+ */
+ struct mbuf *outchain;
+ struct sctp_chunkhdr *chdr;
+ struct sctp_tmit_chunk *chk;
+
+ if (net == NULL)
+ /* must have a net pointer */
+ return;
+
+ outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
+ if (outchain == NULL) {
+ /* gak out of memory */
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
+ }
+#endif
+ chdr = mtod(outchain, struct sctp_chunkhdr *);
+ chdr->chunk_type = SCTP_HEARTBEAT_ACK;
+ chdr->chunk_flags = 0;
+ if (chk_length % 4 != 0) {
+ sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(outchain);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->send_size = chk_length;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = outchain;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cookie_ack(struct sctp_tcb *stcb)
+{
+ /* formulate and queue a cookie-ack back to sender */
+ struct mbuf *cookie_ack;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
+ if (cookie_ack == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie_ack;
+ if (chk->asoc->last_control_chunk_from != NULL) {
+ chk->whoTo = chk->asoc->last_control_chunk_from;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ } else {
+ chk->whoTo = NULL;
+ }
+ hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ACK;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(cookie_ack) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+
+void
+sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN-ACK back to the sender */
+ struct mbuf *m_shutdown_ack;
+ struct sctp_shutdown_ack_chunk *ack_cp;
+ struct sctp_tmit_chunk *chk;
+
+ m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_shutdown_ack == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown_ack;
+ chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->ch.chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+void
+sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN to the sender */
+ struct mbuf *m_shutdown;
+ struct sctp_shutdown_chunk *shutdown_cp;
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
+ /* We already have a SHUTDOWN queued. Reuse it. */
+ if (chk->whoTo) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ break;
+ }
+ }
+ if (chk == NULL) {
+ m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_shutdown == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->send_size = sizeof(struct sctp_shutdown_chunk);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown;
+ chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
+ shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
+ shutdown_cp->ch.chunk_flags = 0;
+ shutdown_cp->ch.chunk_length = htons(chk->send_size);
+ shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+ SCTP_BUF_LEN(m_shutdown) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ } else {
+ TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
+ chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
+ shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ }
+ return;
+}
+
+void
+sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
+{
+ /*
+ * formulate and queue an ASCONF to the peer.
+ * ASCONF parameters should be queued on the assoc queue.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *m_asconf;
+ int len;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
+ (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
+ /* can't send a new one if there is one in flight already */
+ return;
+ }
+
+ /* compose an ASCONF chunk, maximum length is PMTU */
+ m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
+ if (m_asconf == NULL) {
+ return;
+ }
+
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_asconf);
+ return;
+ }
+
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ASCONF;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+ chk->data = m_asconf;
+ chk->send_size = len;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+ chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+void
+sctp_send_asconf_ack(struct sctp_tcb *stcb)
+{
+ /*
+ * formulate and queue a asconf-ack back to sender.
+ * the asconf-ack must be stored in the tcb.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct sctp_asconf_ack *ack, *latest_ack;
+ struct mbuf *m_ack;
+ struct sctp_nets *net = NULL;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* Get the latest ASCONF-ACK */
+ latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
+ if (latest_ack == NULL) {
+ return;
+ }
+ if (latest_ack->last_sent_to != NULL &&
+ latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
+ /* we're doing a retransmission */
+ net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
+ if (net == NULL) {
+ /* no alternate */
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ } else {
+ net = stcb->asoc.last_control_chunk_from;
+ }
+ }
+ } else {
+ /* normal case */
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ } else {
+ net = stcb->asoc.last_control_chunk_from;
+ }
+ }
+ latest_ack->last_sent_to = net;
+
+ TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
+ if (ack->data == NULL) {
+ continue;
+ }
+
+ /* copy the asconf_ack */
+ m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
+ if (m_ack == NULL) {
+ /* couldn't copy it */
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
+ }
+#endif
+
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ if (m_ack)
+ sctp_m_freem(m_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+ chk->whoTo = net;
+ if (chk->whoTo) {
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ chk->data = m_ack;
+ chk->send_size = ack->len;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->asoc = &stcb->asoc;
+
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ }
+ return;
+}
+
+
+static int
+sctp_chunk_retransmission(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
+{
+ /*-
+ * send out one MTU of retransmission. If fast_retransmit is
+ * happening we ignore the cwnd. Otherwise we obey the cwnd and
+ * rwnd. For a Cookie or Asconf in the control chunk queue we
+ * retransmit them by themselves.
+ *
+ * For data chunks we will pick out the lowest TSN's in the sent_queue
+ * marked for resend and bundle them all together (up to a MTU of
+ * destination). The address to send to should have been
+ * selected/changed where the retransmission was marked (i.e. in FR
+ * or t3-timeout routines).
+ */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ struct sctp_tmit_chunk *chk, *fwd;
+ struct mbuf *m, *endofchain;
+ struct sctp_nets *net = NULL;
+ uint32_t tsns_sent = 0;
+ int no_fragmentflg, bundle_at, cnt_thru;
+ unsigned int mtu;
+ int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
+ struct sctp_auth_chunk *auth = NULL;
+ uint32_t auth_offset = 0;
+ uint16_t auth_keyid;
+ int override_ok = 1;
+ int data_auth_reqd = 0;
+ uint32_t dmtu = 0;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ tmr_started = ctl_cnt = bundle_at = error = 0;
+ no_fragmentflg = 1;
+ fwd_tsn = 0;
+ *cnt_out = 0;
+ fwd = NULL;
+ endofchain = m = NULL;
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 1);
+#endif
+ if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (TAILQ_EMPTY(&asoc->control_send_queue))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
+ asoc->sent_queue_retran_cnt);
+ asoc->sent_queue_cnt = 0;
+ asoc->sent_queue_cnt_removeable = 0;
+ /* send back 0/0 so we enter normal transmission */
+ *cnt_out = 0;
+ return (0);
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
+ (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
+ (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ continue;
+ }
+ if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ if (chk != asoc->str_reset) {
+ /*
+ * not eligible for retran if its
+ * not ours
+ */
+ continue;
+ }
+ }
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ fwd_tsn = 1;
+ }
+ /*
+ * Add an AUTH chunk, if chunk requires it save the
+ * offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ m = sctp_add_auth_chunk(m, &endofchain,
+ &auth, &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ break;
+ }
+ }
+ one_chunk = 0;
+ cnt_thru = 0;
+ /* do we have control chunks to retransmit? */
+ if (m != NULL) {
+ /* Start a timer no matter if we succeed or fail */
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
+ } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
+ chk->snd_count++; /* update our count */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
+ (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
+ auth_offset, auth, stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, 0,
+ inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+ chk->whoTo->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ return (error);
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /*
+ * We don't want to mark the net->sent time here since this
+ * we use this for HB and retrans cannot measure RTT
+ */
+ /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
+ *cnt_out += 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+ if (fwd_tsn == 0) {
+ return (0);
+ } else {
+ /* Clean up the fwd-tsn list */
+ sctp_clean_up_ctl(stcb, asoc, so_locked);
+ return (0);
+ }
+ }
+ /*
+ * Ok, it is just data retransmission we need to do or that and a
+ * fwd-tsn with it all.
+ */
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ return (SCTP_RETRAN_DONE);
+ }
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
+ /* not yet open, resend the cookie and that is it */
+ return (1);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(20, inp, stcb, NULL);
+#endif
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ /* No, not sent to this net or not ready for rtx */
+ continue;
+ }
+ if (chk->data == NULL) {
+ SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
+ chk->rec.data.tsn, chk->snd_count, chk->sent);
+ continue;
+ }
+ if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
+ (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
+ chk->rec.data.tsn, chk->snd_count);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
+ so_locked);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (SCTP_RETRAN_EXIT);
+ }
+ /* pick up the net */
+ net = chk->whoTo;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ mtu = net->mtu - sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
+ }
+
+ if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
+ /* No room in peers rwnd */
+ uint32_t tsn;
+
+ tsn = asoc->last_acked_seq + 1;
+ if (tsn == chk->rec.data.tsn) {
+ /*
+ * we make a special exception for this
+ * case. The peer has no rwnd but is missing
+ * the lowest chunk.. which is probably what
+ * is holding up the rwnd.
+ */
+ goto one_chunk_around;
+ }
+ return (1);
+ }
+ one_chunk_around:
+ if (asoc->peers_rwnd < mtu) {
+ one_chunk = 1;
+ if ((asoc->peers_rwnd == 0) &&
+ (asoc->total_flight == 0)) {
+ chk->window_probe = 1;
+ chk->whoTo->window_probe = 1;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 2);
+#endif
+ bundle_at = 0;
+ m = NULL;
+ net->fast_retran_ip = 0;
+ if (chk->rec.data.doing_fast_retransmit == 0) {
+ /*
+ * if no FR in progress skip destination that have
+ * flight_size > cwnd.
+ */
+ if (net->flight_size >= net->cwnd) {
+ continue;
+ }
+ } else {
+ /*
+ * Mark the destination net to have FR recovery
+ * limits put on it.
+ */
+ *fr_done = 1;
+ net->fast_retran_ip = 1;
+ }
+
+ /*
+ * if no AUTH is yet included and this chunk requires it,
+ * make sure to account for it. We don't apply the size
+ * until the AUTH chunk is actually added below in case
+ * there is no room for this chunk.
+ */
+ if (data_auth_reqd && (auth == NULL)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+
+ if ((chk->send_size <= (mtu - dmtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /* ok we will add this one */
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ } else if (chk->auth_keyid != auth_keyid) {
+ /* different keyid, so done bundling */
+ break;
+ }
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (chk->send_size + dmtu))
+ mtu -= (chk->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = chk;
+ if (one_chunk && (asoc->total_flight <= 0)) {
+ SCTP_STAT_INCR(sctps_windowprobed);
+ }
+ }
+ if (one_chunk == 0) {
+ /*
+ * now are there anymore forward from chk to pick
+ * up?
+ */
+ for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
+ if (fwd->sent != SCTP_DATAGRAM_RESEND) {
+ /* Nope, not for retran */
+ continue;
+ }
+ if (fwd->whoTo != net) {
+ /* Nope, not the net in question */
+ continue;
+ }
+ if (data_auth_reqd && (auth == NULL)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+ if (fwd->send_size <= (mtu - dmtu)) {
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = fwd->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ auth_keyid = fwd->auth_keyid;
+ override_ok = 0;
+ } else if (fwd->auth_keyid != auth_keyid) {
+ /* different keyid, so done bundling */
+ break;
+ }
+ }
+ m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (fwd->send_size + dmtu))
+ mtu -= (fwd->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = fwd;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ break;
+ }
+ } else {
+ /* can't fit so we are done */
+ break;
+ }
+ }
+ }
+ /* Is there something to send for this destination? */
+ if (m) {
+ /*
+ * No matter if we fail/or succeed we should start a
+ * timer. A failure is like a lost IP packet :-)
+ */
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ tmr_started = 1;
+ }
+ /* Now lets send it, if there is anything to send :> */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr, m,
+ auth_offset, auth, auth_keyid,
+ no_fragmentflg, 0, 0,
+ inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ /* error, we could not output */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ return (error);
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /* For HB's */
+ /*
+ * We don't want to mark the net->sent time here
+ * since this we use this for HB and retrans cannot
+ * measure RTT
+ */
+ /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
+
+ /* For auto-close */
+ cnt_thru++;
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+ *now = asoc->time_last_sent;
+ *now_filled = 1;
+ } else {
+ asoc->time_last_sent = *now;
+ }
+ *cnt_out += bundle_at;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC4, bundle_at);
+#endif
+ if (bundle_at) {
+ tsns_sent = data_list[0]->rec.data.tsn;
+ }
+ for (i = 0; i < bundle_at; i++) {
+ SCTP_STAT_INCR(sctps_sendretransdata);
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ /*
+ * When we have a revoked data, and we
+ * retransmit it, then we clear the revoked
+ * flag since this flag dictates if we
+ * subtracted from the fs
+ */
+ if (data_list[i]->rec.data.chunk_was_revoked) {
+ /* Deflate the cwnd */
+ data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ }
+ data_list[i]->snd_count++;
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ /* record the time */
+ data_list[i]->sent_rcv_time = asoc->time_last_sent;
+ if (data_list[i]->book_size_scale) {
+ /*
+ * need to double the book size on
+ * this one
+ */
+ data_list[i]->book_size_scale = 0;
+ /* Since we double the booksize, we must
+ * also double the output queue size, since this
+ * get shrunk when we free by this amount.
+ */
+ atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
+ data_list[i]->book_size *= 2;
+
+
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size +
+ SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
+ data_list[i]->whoTo->flight_size,
+ data_list[i]->book_size,
+ (uint32_t)(uintptr_t)data_list[i]->whoTo,
+ data_list[i]->rec.data.tsn);
+ }
+ sctp_flight_size_increase(data_list[i]);
+ sctp_total_flight_increase(stcb, data_list[i]);
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if ((i == 0) &&
+ (data_list[i]->rec.data.doing_fast_retransmit)) {
+ SCTP_STAT_INCR(sctps_sendfastretrans);
+ if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
+ (tmr_started == 0)) {
+ /*-
+ * ok we just fast-retrans'd
+ * the lowest TSN, i.e the
+ * first on the list. In
+ * this case we want to give
+ * some more time to get a
+ * SACK back without a
+ * t3-expiring.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(21, inp, stcb, NULL);
+#endif
+ } else {
+ /* None will fit */
+ return (1);
+ }
+ if (asoc->sent_queue_retran_cnt <= 0) {
+ /* all done we have no more to retran */
+ asoc->sent_queue_retran_cnt = 0;
+ break;
+ }
+ if (one_chunk) {
+ /* No more room in rwnd */
+ return (1);
+ }
+ /* stop the for loop here. we sent out a packet */
+ break;
+ }
+ return (0);
+}
+
+static void
+sctp_timer_validation(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+
+ /* Validate that a timer is running somewhere */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /* Here is a timer */
+ return;
+ }
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* Gak, we did not have a timer somewhere */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
+ if (asoc->alternate) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
+ } else {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
+ }
+ return;
+}
+
+void
+sctp_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ int from_where,
+ int so_locked)
+{
+ /*-
+ * Ok this is the generic chunk service queue. we must do the
+ * following:
+ * - See if there are retransmits pending, if so we must
+ * do these first.
+ * - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e.
+ * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
+ * TSN's
+ * - Check to see if the cwnd/rwnd allows any output, if so we
+ * go ahead and fomulate and send the low level chunks. Making sure
+ * to combine any control in the control chunk queue also.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+ int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
+ unsigned int burst_cnt = 0;
+ struct timeval now;
+ int now_filled = 0;
+ int nagle_on;
+ int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ int un_sent = 0;
+ int fr_done;
+ unsigned int tot_frs = 0;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ asoc = &stcb->asoc;
+do_it_again:
+ /* The Nagle algorithm is only applied when handling a send call. */
+ if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
+ nagle_on = 0;
+ } else {
+ nagle_on = 1;
+ }
+ } else {
+ nagle_on = 0;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+ if ((un_sent <= 0) &&
+ (TAILQ_EMPTY(&asoc->control_send_queue)) &&
+ (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (asoc->trigger_reset == 0)) {
+ /* Nothing to do unless there is something to be sent left */
+ return;
+ }
+ /* Do we have something to send, data or control AND
+ * a sack timer running, if so piggy-back the sack.
+ */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_send_sack(stcb, so_locked);
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
+ }
+ while (asoc->sent_queue_retran_cnt) {
+ /*-
+ * Ok, it is retransmission time only, we send out only ONE
+ * packet with a single call off to the retran code.
+ */
+ if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
+ /*-
+ * Special hook for handling cookiess discarded
+ * by peer that carried data. Send cookie-ack only
+ * and then the next call with get the retran's.
+ */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+ from_where,
+ &now, &now_filled, frag_point, so_locked);
+ return;
+ } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
+ /* if its not from a HB then do it */
+ fr_done = 0;
+ ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
+ if (fr_done) {
+ tot_frs++;
+ }
+ } else {
+ /*
+ * its from any other place, we don't allow retran
+ * output (only control)
+ */
+ ret = 1;
+ }
+ if (ret > 0) {
+ /* Can't send anymore */
+ /*-
+ * now lets push out control by calling med-level
+ * output once. this assures that we WILL send HB's
+ * if queued too.
+ */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+ from_where,
+ &now, &now_filled, frag_point, so_locked);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(8, inp, stcb, NULL);
+#endif
+ sctp_timer_validation(inp, stcb, asoc);
+ return;
+ }
+ if (ret < 0) {
+ /*-
+ * The count was off.. retran is not happening so do
+ * the normal retransmission.
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(9, inp, stcb, NULL);
+#endif
+ if (ret == SCTP_RETRAN_EXIT) {
+ return;
+ }
+ break;
+ }
+ if (from_where == SCTP_OUTPUT_FROM_T3) {
+ /* Only one transmission allowed out of a timeout */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(10, inp, stcb, NULL);
+#endif
+ /* Push out any control */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
+ &now, &now_filled, frag_point, so_locked);
+ return;
+ }
+ if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
+ /* Hit FR burst limit */
+ return;
+ }
+ if ((num_out == 0) && (ret == 0)) {
+ /* No more retrans to send */
+ break;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(12, inp, stcb, NULL);
+#endif
+ /* Check for bad destinations, if they exist move chunks around. */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+ /*-
+ * if possible move things off of this address we
+ * still may send below due to the dormant state but
+ * we try to find an alternate address to send to
+ * and if we have one we move all queued data on the
+ * out wheel to this alternate address.
+ */
+ if (net->ref_count > 1)
+ sctp_move_chunks_from_net(stcb, net);
+ } else {
+ /*-
+ * if ((asoc->sat_network) || (net->addr_is_local))
+ * { burst_limit = asoc->max_burst *
+ * SCTP_SAT_NETWORK_BURST_INCR; }
+ */
+ if (asoc->max_burst > 0) {
+ if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
+ if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
+ /* JRS - Use the congestion control given in the congestion control module */
+ asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ }
+ net->fast_retran_ip = 0;
+ } else {
+ if (net->flight_size == 0) {
+ /* Should be decaying the cwnd here */
+ ;
+ }
+ }
+ }
+ }
+
+ }
+ burst_cnt = 0;
+ do {
+ error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
+ &reason_code, 0, from_where,
+ &now, &now_filled, frag_point, so_locked);
+ if (error) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
+ sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
+ }
+ break;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
+
+ tot_out += num_out;
+ burst_cnt++;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
+ if (num_out == 0) {
+ sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
+ }
+ }
+ if (nagle_on) {
+ /*
+ * When the Nagle algorithm is used, look at how much
+ * is unsent, then if its smaller than an MTU and we
+ * have data in flight we stop, except if we are
+ * handling a fragmented user message.
+ */
+ un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
+ if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
+ (stcb->asoc.total_flight > 0)) {
+/* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
+ break;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ sctp_is_there_unsent_data(stcb, so_locked) == 0) {
+ /* Nothing left to send */
+ break;
+ }
+ if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
+ /* Nothing left to send */
+ break;
+ }
+ } while (num_out &&
+ ((asoc->max_burst == 0) ||
+ SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
+ (burst_cnt < asoc->max_burst)));
+
+ if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
+ if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ asoc->burst_limit_applied = 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
+ }
+ } else {
+ asoc->burst_limit_applied = 0;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
+ tot_out);
+
+ /*-
+ * Now we need to clean up the control chunk chain if a ECNE is on
+ * it. It must be marked as UNSENT again so next call will continue
+ * to send it until such time that we get a CWR, to remove it.
+ */
+ if (stcb->asoc.ecn_echo_cnt_onq)
+ sctp_fix_ecn_echo(asoc);
+
+ if (stcb->asoc.trigger_reset) {
+ if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
+ goto do_it_again;
+ }
+ }
+ return;
+}
+
+
+int
+sctp_output(
+ struct sctp_inpcb *inp,
+ struct mbuf *m,
+ struct sockaddr *addr,
+ struct mbuf *control,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *p,
+#elif defined(_WIN32) && !defined(__Userspace__)
+ PKTHREAD p,
+#else
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct proc *p SCTP_UNUSED,
+#else
+ struct proc *p,
+#endif
+#endif
+ int flags)
+{
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+
+ if (inp->sctp_socket == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ return (sctp_sosend(inp->sctp_socket,
+ addr,
+ (struct uio *)NULL,
+ m,
+ control,
+#if defined(__APPLE__) && !defined(__Userspace__)
+ flags
+#else
+ flags, p
+#endif
+ ));
+}
+
+void
+send_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *at, *tp1, *last;
+ struct sctp_forward_tsn_chunk *fwdtsn;
+ struct sctp_strseq *strseq;
+ struct sctp_strseq_mid *strseq_m;
+ uint32_t advance_peer_ack_point;
+ unsigned int cnt_of_space, i, ovh;
+ unsigned int space_needed;
+ unsigned int cnt_of_skipped = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ /* mark it to unsent */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ /* Do we correct its output location? */
+ if (chk->whoTo) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ goto sctp_fill_in_rest;
+ }
+ }
+ /* Ok if we reach here we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ asoc->fwd_tsn_cnt++;
+ chk->copy_by_ref = 0;
+ /*
+ * We don't do the old thing here since
+ * this is used not for on-wire but to
+ * tell if we are sending a fwd-tsn by
+ * the stack during output. And if its
+ * a IFORWARD or a FORWARD it is a fwd-tsn.
+ */
+ chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = asoc;
+ chk->whoTo = NULL;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+sctp_fill_in_rest:
+ /*-
+ * Here we go through and fill out the part that deals with
+ * stream/seq of the ones we skip.
+ */
+ SCTP_BUF_LEN(chk->data) = 0;
+ TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+ if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
+ (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
+ /* no more to look at */
+ break;
+ }
+ if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+ /* We don't report these */
+ continue;
+ }
+ cnt_of_skipped++;
+ }
+ if (asoc->idata_supported) {
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
+ } else {
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
+ }
+ cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
+ /* trim to a mtu size */
+ cnt_of_space = asoc->smallest_mtu - ovh;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, 0, cnt_of_skipped,
+ asoc->advanced_peer_ack_point);
+ }
+ advance_peer_ack_point = asoc->advanced_peer_ack_point;
+ if (cnt_of_space < space_needed) {
+ /*-
+ * ok we must trim down the chunk by lowering the
+ * advance peer ack point.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, 0xff, cnt_of_space,
+ space_needed);
+ }
+ cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ if (asoc->idata_supported) {
+ cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
+ } else {
+ cnt_of_skipped /= sizeof(struct sctp_strseq);
+ }
+ /*-
+ * Go through and find the TSN that will be the one
+ * we report.
+ */
+ at = TAILQ_FIRST(&asoc->sent_queue);
+ if (at != NULL) {
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ if (tp1 == NULL) {
+ break;
+ }
+ at = tp1;
+ }
+ }
+ if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, cnt_of_skipped, at->rec.data.tsn,
+ asoc->advanced_peer_ack_point);
+ }
+ last = at;
+ /*-
+ * last now points to last one I can report, update
+ * peer ack point
+ */
+ if (last) {
+ advance_peer_ack_point = last->rec.data.tsn;
+ }
+ if (asoc->idata_supported) {
+ space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+ cnt_of_skipped * sizeof(struct sctp_strseq_mid);
+ } else {
+ space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+ cnt_of_skipped * sizeof(struct sctp_strseq);
+ }
+ }
+ chk->send_size = space_needed;
+ /* Setup the chunk */
+ fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
+ fwdtsn->ch.chunk_length = htons(chk->send_size);
+ fwdtsn->ch.chunk_flags = 0;
+ if (asoc->idata_supported) {
+ fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
+ } else {
+ fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ }
+ fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ fwdtsn++;
+ /*-
+ * Move pointer to after the fwdtsn and transfer to the
+ * strseq pointer.
+ */
+ if (asoc->idata_supported) {
+ strseq_m = (struct sctp_strseq_mid *)fwdtsn;
+ strseq = NULL;
+ } else {
+ strseq = (struct sctp_strseq *)fwdtsn;
+ strseq_m = NULL;
+ }
+ /*-
+ * Now populate the strseq list. This is done blindly
+ * without pulling out duplicate stream info. This is
+ * inefficent but won't harm the process since the peer will
+ * look at these in sequence and will thus release anything.
+ * It could mean we exceed the PMTU and chop off some that
+ * we could have included.. but this is unlikely (aka 1432/4
+ * would mean 300+ stream seq's would have to be reported in
+ * one FWD-TSN. With a bit of work we can later FIX this to
+ * optimize and pull out duplicates.. but it does add more
+ * overhead. So for now... not!
+ */
+ i = 0;
+ TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+ if (i >= cnt_of_skipped) {
+ break;
+ }
+ if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+ /* We don't report these */
+ continue;
+ }
+ if (at->rec.data.tsn == advance_peer_ack_point) {
+ at->rec.data.fwd_tsn_cnt = 0;
+ }
+ if (asoc->idata_supported) {
+ strseq_m->sid = htons(at->rec.data.sid);
+ if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+ strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
+ } else {
+ strseq_m->flags = 0;
+ }
+ strseq_m->mid = htonl(at->rec.data.mid);
+ strseq_m++;
+ } else {
+ strseq->sid = htons(at->rec.data.sid);
+ strseq->ssn = htons((uint16_t)at->rec.data.mid);
+ strseq++;
+ }
+ i++;
+ }
+ return;
+}
+
+void
+sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
+{
+ /*-
+ * Queue up a SACK or NR-SACK in the control queue.
+ * We must first check to see if a SACK or NR-SACK is
+ * somehow on the control queue.
+ * If so, we will take and and remove the old one.
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *a_chk;
+ struct sctp_sack_chunk *sack;
+ struct sctp_nr_sack_chunk *nr_sack;
+ struct sctp_gap_ack_block *gap_descriptor;
+ const struct sack_track *selector;
+ int mergeable = 0;
+ int offset;
+ caddr_t limit;
+ uint32_t *dup;
+ int limit_reached = 0;
+ unsigned int i, siz, j;
+ unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
+ int num_dups = 0;
+ int space_req;
+ uint32_t highest_tsn;
+ uint8_t flags;
+ uint8_t type;
+ uint8_t tsn_map;
+
+ if (stcb->asoc.nrsack_supported == 1) {
+ type = SCTP_NR_SELECTIVE_ACK;
+ } else {
+ type = SCTP_SELECTIVE_ACK;
+ }
+ a_chk = NULL;
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->last_data_chunk_from == NULL) {
+ /* Hmm we never received anything */
+ return;
+ }
+ sctp_slide_mapping_arrays(stcb);
+ sctp_set_rwnd(stcb, asoc);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == type) {
+ /* Hmm, found a sack already on queue, remove it */
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ a_chk = chk;
+ if (a_chk->data) {
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ if (a_chk->whoTo) {
+ sctp_free_remote_addr(a_chk->whoTo);
+ a_chk->whoTo = NULL;
+ }
+ break;
+ }
+ }
+ if (a_chk == NULL) {
+ sctp_alloc_a_chunk(stcb, a_chk);
+ if (a_chk == NULL) {
+ /* No memory so we drop the idea, and set a timer */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ a_chk->copy_by_ref = 0;
+ a_chk->rec.chunk_id.id = type;
+ a_chk->rec.chunk_id.can_take_data = 1;
+ }
+ /* Clear our pkt counts */
+ asoc->data_pkts_seen = 0;
+
+ a_chk->flags = 0;
+ a_chk->asoc = asoc;
+ a_chk->snd_count = 0;
+ a_chk->send_size = 0; /* fill in later */
+ a_chk->sent = SCTP_DATAGRAM_UNSENT;
+ a_chk->whoTo = NULL;
+
+ if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
+ /*-
+ * Ok, the destination for the SACK is unreachable, lets see if
+ * we can select an alternate to asoc->last_data_chunk_from
+ */
+ a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
+ if (a_chk->whoTo == NULL) {
+ /* Nope, no alternate */
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ }
+ } else {
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ }
+ if (a_chk->whoTo) {
+ atomic_add_int(&a_chk->whoTo->ref_count, 1);
+ }
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ }
+ if (highest_tsn == asoc->cumulative_tsn) {
+ /* no gaps */
+ if (type == SCTP_SELECTIVE_ACK) {
+ space_req = sizeof(struct sctp_sack_chunk);
+ } else {
+ space_req = sizeof(struct sctp_nr_sack_chunk);
+ }
+ } else {
+ /* gaps get a cluster */
+ space_req = MCLBYTES;
+ }
+ /* Ok now lets formulate a MBUF with our sack */
+ a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
+ if ((a_chk->data == NULL) ||
+ (a_chk->whoTo == NULL)) {
+ /* rats, no mbuf memory */
+ if (a_chk->data) {
+ /* was a problem with the destination */
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, a_chk, so_locked);
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ /* ok, lets go through and fill it in */
+ SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
+ space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
+ if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
+ space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
+ }
+ limit = mtod(a_chk->data, caddr_t);
+ limit += space;
+
+ flags = 0;
+
+ if ((asoc->sctp_cmt_on_off > 0) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*-
+ * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
+ * received, then set high bit to 1, else 0. Reset
+ * pkts_rcvd.
+ */
+ flags |= (asoc->cmt_dac_pkts_rcvd << 6);
+ asoc->cmt_dac_pkts_rcvd = 0;
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
+ stcb->asoc.cumack_log_atsnt++;
+ if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_atsnt = 0;
+ }
+#endif
+ /* reset the readers interpretation */
+ stcb->freed_by_sorcv_sincelast = 0;
+
+ if (type == SCTP_SELECTIVE_ACK) {
+ sack = mtod(a_chk->data, struct sctp_sack_chunk *);
+ nr_sack = NULL;
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
+ if (highest_tsn > asoc->mapping_array_base_tsn) {
+ siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
+ }
+ } else {
+ sack = NULL;
+ nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
+ if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
+ siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
+ }
+ }
+
+ if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
+ offset = 1;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ }
+ if (((type == SCTP_SELECTIVE_ACK) &&
+ SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
+ ((type == SCTP_NR_SELECTIVE_ACK) &&
+ SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ tsn_map = asoc->mapping_array[i];
+ if (type == SCTP_SELECTIVE_ACK) {
+ tsn_map |= asoc->nr_mapping_array[i];
+ }
+ if (i == 0) {
+ /*
+ * Clear all bits corresponding to TSNs
+ * smaller or equal to the cumulative TSN.
+ */
+ tsn_map &= (~0U << (1 - offset));
+ }
+ selector = &sack_array[tsn_map];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were ok to
+ * merge.
+ */
+ num_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = 0; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT setting
+ * the left side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the left
+ * side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ offset += 8;
+ }
+ }
+ if ((type == SCTP_NR_SELECTIVE_ACK) &&
+ (limit_reached == 0)) {
+
+ mergeable = 0;
+
+ if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
+ siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
+ }
+
+ if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
+ offset = 1;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ }
+ if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ tsn_map = asoc->nr_mapping_array[i];
+ if (i == 0) {
+ /*
+ * Clear all bits corresponding to TSNs
+ * smaller or equal to the cumulative TSN.
+ */
+ tsn_map &= (~0U << (1 - offset));
+ }
+ selector = &sack_array[tsn_map];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were ok to
+ * merge.
+ */
+ num_nr_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = 0; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT setting
+ * the left side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the left
+ * side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_nr_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ offset += 8;
+ }
+ }
+ }
+ /* now we must add any dups we are going to report. */
+ if ((limit_reached == 0) && (asoc->numduptsns)) {
+ dup = (uint32_t *) gap_descriptor;
+ for (i = 0; i < asoc->numduptsns; i++) {
+ *dup = htonl(asoc->dup_tsns[i]);
+ dup++;
+ num_dups++;
+ if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
+ /* no more room */
+ break;
+ }
+ }
+ asoc->numduptsns = 0;
+ }
+ /*
+ * now that the chunk is prepared queue it to the control chunk
+ * queue.
+ */
+ if (type == SCTP_SELECTIVE_ACK) {
+ a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
+ (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+ num_dups * sizeof(int32_t));
+ SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+ sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ sack->sack.a_rwnd = htonl(asoc->my_rwnd);
+ sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
+ sack->sack.num_dup_tsns = htons(num_dups);
+ sack->ch.chunk_type = type;
+ sack->ch.chunk_flags = flags;
+ sack->ch.chunk_length = htons(a_chk->send_size);
+ } else {
+ a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
+ (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+ num_dups * sizeof(int32_t));
+ SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+ nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
+ nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
+ nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
+ nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
+ nr_sack->nr_sack.reserved = 0;
+ nr_sack->ch.chunk_type = type;
+ nr_sack->ch.chunk_flags = flags;
+ nr_sack->ch.chunk_length = htons(a_chk->send_size);
+ }
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
+ asoc->my_last_reported_rwnd = asoc->my_rwnd;
+ asoc->ctrl_queue_cnt++;
+ asoc->send_sack = 0;
+ SCTP_STAT_INCR(sctps_sendsacks);
+ return;
+}
+
+void
+sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
+{
+ struct mbuf *m_abort, *m, *m_last;
+ struct mbuf *m_out, *m_end = NULL;
+ struct sctp_abort_chunk *abort;
+ struct sctp_auth_chunk *auth = NULL;
+ struct sctp_nets *net;
+ uint32_t vtag;
+ uint32_t auth_offset = 0;
+ int error;
+ uint16_t cause_len, chunk_len, padding_len;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ }
+#endif
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /*-
+ * Add an AUTH chunk, if chunk requires it and save the offset into
+ * the chain for AUTH
+ */
+ if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
+ stcb->asoc.peer_auth_chunks)) {
+ m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
+ stcb, SCTP_ABORT_ASSOCIATION);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else {
+ m_out = NULL;
+ }
+ m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_abort == NULL) {
+ if (m_out) {
+ sctp_m_freem(m_out);
+ }
+ if (operr) {
+ sctp_m_freem(operr);
+ }
+ return;
+ }
+ /* link in any error */
+ SCTP_BUF_NEXT(m_abort) = operr;
+ cause_len = 0;
+ m_last = NULL;
+ for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
+ cause_len += (uint16_t)SCTP_BUF_LEN(m);
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ m_last = m;
+ }
+ }
+ SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
+ chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
+ padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
+ if (m_out == NULL) {
+ /* NO Auth chunk prepended, so reserve space in front */
+ SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
+ m_out = m_abort;
+ } else {
+ /* Put AUTH chunk at the front of the chain */
+ SCTP_BUF_NEXT(m_end) = m_abort;
+ }
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ /* Fill in the ABORT chunk header. */
+ abort = mtod(m_abort, struct sctp_abort_chunk *);
+ abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+ if (stcb->asoc.peer_vtag == 0) {
+ /* This happens iff the assoc is in COOKIE-WAIT state. */
+ vtag = stcb->asoc.my_vtag;
+ abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
+ } else {
+ vtag = stcb->asoc.peer_vtag;
+ abort->ch.chunk_flags = 0;
+ }
+ abort->ch.chunk_length = htons(chunk_len);
+ /* Add padding, if necessary. */
+ if (padding_len > 0) {
+ if ((m_last == NULL) ||
+ (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
+ sctp_m_freem(m_out);
+ return;
+ }
+ }
+ if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
+ stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
+ stcb->asoc.primary_destination->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ so_locked))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ stcb->asoc.ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ } else {
+ stcb->asoc.ifp_had_enobuf = 0;
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+void
+sctp_send_shutdown_complete(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int reflect_vtag)
+{
+ /* formulate and SEND a SHUTDOWN-COMPLETE */
+ struct mbuf *m_shutdown_comp;
+ struct sctp_shutdown_complete_chunk *shutdown_complete;
+ uint32_t vtag;
+ int error;
+ uint8_t flags;
+
+ m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_shutdown_comp == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ if (reflect_vtag) {
+ flags = SCTP_HAD_NO_TCB;
+ vtag = stcb->asoc.my_vtag;
+ } else {
+ flags = 0;
+ vtag = stcb->asoc.peer_vtag;
+ }
+ shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
+ shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+ shutdown_complete->ch.chunk_flags = flags;
+ shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+ SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
+ if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
+ stcb->sctp_ep->sctp_lport, stcb->rport,
+ htonl(vtag),
+ net->port, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ 0, 0,
+#endif
+ SCTP_SO_NOT_LOCKED))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (error == ENOBUFS) {
+ stcb->asoc.ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ } else {
+ stcb->asoc.ifp_had_enobuf = 0;
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ return;
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static void
+sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag,
+ uint8_t type, struct mbuf *cause,
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+ uint32_t vrf_id, uint16_t port)
+#else
+static void
+sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag,
+ uint8_t type, struct mbuf *cause,
+ uint32_t vrf_id SCTP_UNUSED, uint16_t port)
+#endif
+{
+ struct mbuf *o_pak;
+ struct mbuf *mout;
+ struct sctphdr *shout;
+ struct sctp_chunkhdr *ch;
+#if defined(INET) || defined(INET6)
+ struct udphdr *udp;
+#endif
+ int ret, len, cause_len, padding_len;
+#ifdef INET
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sctp_route_t ro;
+#endif
+ struct sockaddr_in *src_sin, *dst_sin;
+ struct ip *ip;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *src_sin6, *dst_sin6;
+ struct ip6_hdr *ip6;
+#endif
+
+ /* Compute the length of the cause and add final padding. */
+ cause_len = 0;
+ if (cause != NULL) {
+ struct mbuf *m_at, *m_last = NULL;
+
+ for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ if (SCTP_BUF_NEXT(m_at) == NULL)
+ m_last = m_at;
+ cause_len += SCTP_BUF_LEN(m_at);
+ }
+ padding_len = cause_len % 4;
+ if (padding_len != 0) {
+ padding_len = 4 - padding_len;
+ }
+ if (padding_len != 0) {
+ if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+ sctp_m_freem(cause);
+ return;
+ }
+ }
+ } else {
+ padding_len = 0;
+ }
+ /* Get an mbuf for the header. */
+ len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ len += sizeof(struct ip);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ len += sizeof(struct ip6_hdr);
+ break;
+#endif
+ default:
+ break;
+ }
+#if defined(INET) || defined(INET6)
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
+#else
+ mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
+#endif
+#else
+ mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
+#endif
+ if (mout == NULL) {
+ if (cause) {
+ sctp_m_freem(cause);
+ }
+ return;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ SCTP_BUF_RESV_UF(mout, max_linkhdr);
+#else
+ SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
+#endif
+#else
+ SCTP_BUF_RESV_UF(mout, max_linkhdr);
+#endif
+ SCTP_BUF_LEN(mout) = len;
+ SCTP_BUF_NEXT(mout) = cause;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ M_SETFIB(mout, fibnum);
+ mout->m_pkthdr.flowid = mflowid;
+ M_HASHTYPE_SET(mout, mflowtype);
+#endif
+#ifdef INET
+ ip = NULL;
+#endif
+#ifdef INET6
+ ip6 = NULL;
+#endif
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ src_sin = (struct sockaddr_in *)src;
+ dst_sin = (struct sockaddr_in *)dst;
+ ip = mtod(mout, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ ip->ip_tos = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ip->ip_off = htons(IP_DF);
+#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
+ ip->ip_off = IP_DF;
+#else
+ ip->ip_off = htons(IP_DF);
+#endif
+#if defined(__Userspace__)
+ ip->ip_id = htons(ip_id++);
+#elif defined(__FreeBSD__)
+ ip_fillid(ip);
+#elif defined(__APPLE__)
+#if RANDOM_IP_ID
+ ip->ip_id = ip_randomid();
+#else
+ ip->ip_id = htons(ip_id++);
+#endif
+#else
+ ip->ip_id = ip_id++;
+#endif
+ ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
+ if (port) {
+ ip->ip_p = IPPROTO_UDP;
+ } else {
+ ip->ip_p = IPPROTO_SCTP;
+ }
+ ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
+ ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
+ ip->ip_sum = 0;
+ len = sizeof(struct ip);
+ shout = (struct sctphdr *)((caddr_t)ip + len);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ src_sin6 = (struct sockaddr_in6 *)src;
+ dst_sin6 = (struct sockaddr_in6 *)dst;
+ ip6 = mtod(mout, struct ip6_hdr *);
+ ip6->ip6_flow = htonl(0x60000000);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (V_ip6_auto_flowlabel) {
+ ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+ }
+#endif
+#if defined(__Userspace__)
+ ip6->ip6_hlim = IPv6_HOP_LIMIT;
+#else
+ ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+#endif
+ if (port) {
+ ip6->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6->ip6_src = dst_sin6->sin6_addr;
+ ip6->ip6_dst = src_sin6->sin6_addr;
+ len = sizeof(struct ip6_hdr);
+ shout = (struct sctphdr *)((caddr_t)ip6 + len);
+ break;
+#endif
+ default:
+ len = 0;
+ shout = mtod(mout, struct sctphdr *);
+ break;
+ }
+#if defined(INET) || defined(INET6)
+ if (port) {
+ if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+ sctp_m_freem(mout);
+ return;
+ }
+ udp = (struct udphdr *)shout;
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_sum = 0;
+ udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
+ sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr) +
+ cause_len + padding_len));
+ len += sizeof(struct udphdr);
+ shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
+ } else {
+ udp = NULL;
+ }
+#endif
+ shout->src_port = sh->dest_port;
+ shout->dest_port = sh->src_port;
+ shout->checksum = 0;
+ if (vtag) {
+ shout->v_tag = htonl(vtag);
+ } else {
+ shout->v_tag = sh->v_tag;
+ }
+ len += sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
+ ch->chunk_type = type;
+ if (vtag) {
+ ch->chunk_flags = 0;
+ } else {
+ ch->chunk_flags = SCTP_HAD_NO_TCB;
+ }
+ ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
+ len += sizeof(struct sctp_chunkhdr);
+ len += cause_len + padding_len;
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ sctp_m_freem(mout);
+ return;
+ }
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+#if defined(__APPLE__) && !defined(__Userspace__)
+ /* zap the stack pointer to the route */
+ memset(&ro, 0, sizeof(sctp_route_t));
+#endif
+ if (port) {
+#if !defined(_WIN32) && !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ if (V_udp_cksum) {
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ } else {
+ udp->uh_sum = 0;
+ }
+#else
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+#endif
+#else
+ udp->uh_sum = 0;
+#endif
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ip->ip_len = htons(len);
+#elif defined(__APPLE__) || defined(__Userspace__)
+ ip->ip_len = len;
+#else
+ ip->ip_len = htons(len);
+#endif
+ if (port) {
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#if !defined(_WIN32) && !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ if (V_udp_cksum) {
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ }
+#else
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+#endif
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(o_pak);
+ }
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
+ /* Free the route if we got one back */
+ if (ro.ro_rt) {
+ RTFREE(ro.ro_rt);
+ ro.ro_rt = NULL;
+ }
+#else
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
+#endif
+ SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
+ if (port) {
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#if !defined(__Userspace__)
+#if defined(_WIN32)
+ udp->uh_sum = 0;
+#else
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+#endif
+#endif
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
+ mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+ shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(o_pak);
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
+#endif
+ SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ char *buffer;
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)src;
+ if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
+ shout->checksum = sctp_calculate_cksum(mout, 0);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(mout);
+ }
+#endif
+ /* Don't alloc/free for each packet */
+ if ((buffer = malloc(len)) != NULL) {
+ m_copydata(mout, 0, len, buffer);
+ ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
+ free(buffer);
+ } else {
+ ret = ENOMEM;
+ }
+ sctp_m_freem(mout);
+ break;
+ }
+#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+ dst->sa_family);
+ sctp_m_freem(mout);
+ SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (port) {
+ UDPSTAT_INC(udps_opackets);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ if (ret) {
+ SCTP_STAT_INCR(sctps_senderrors);
+ }
+ return;
+}
+
+void
+sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+}
+
+void
+sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_heartbeat_chunk *hb;
+ struct timeval now;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (net == NULL) {
+ return;
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ break;
+#endif
+ default:
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
+ return;
+ }
+
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_heartbeat_chunk);
+
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ /* Now we have a mbuf that we can fill in with the details */
+ hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
+ memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
+ /* fill out chunk header */
+ hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
+ hb->ch.chunk_flags = 0;
+ hb->ch.chunk_length = htons(chk->send_size);
+ /* Fill out hb parameter */
+ hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
+ hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
+ hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
+ hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
+ /* Did our user request this one, put it in */
+ hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
+#ifdef HAVE_SA_LEN
+ hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
+#else
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ hb->heartbeat.hb_info.addr_len = 0;
+ break;
+ }
+#endif
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * we only take from the entropy pool if the address is not
+ * confirmed.
+ */
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ } else {
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
+ }
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ memcpy(hb->heartbeat.hb_info.address,
+ &net->ro._l_addr.sin.sin_addr,
+ sizeof(net->ro._l_addr.sin.sin_addr));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(hb->heartbeat.hb_info.address,
+ &net->ro._l_addr.sin6.sin6_addr,
+ sizeof(net->ro._l_addr.sin6.sin6_addr));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(hb->heartbeat.hb_info.address,
+ &net->ro._l_addr.sconn.sconn_addr,
+ sizeof(net->ro._l_addr.sconn.sconn_addr));
+ break;
+#endif
+ default:
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ return;
+ break;
+ }
+ net->hb_responded = 0;
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ SCTP_STAT_INCR(sctps_sendheartbeat);
+ return;
+}
+
+void
+sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint32_t high_tsn)
+{
+ struct sctp_association *asoc;
+ struct sctp_ecne_chunk *ecne;
+ struct sctp_tmit_chunk *chk;
+
+ if (net == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
+ /* found a previous ECN_ECHO update it if needed */
+ uint32_t cnt, ctsn;
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ctsn = ntohl(ecne->tsn);
+ if (SCTP_TSN_GT(high_tsn, ctsn)) {
+ ecne->tsn = htonl(high_tsn);
+ SCTP_STAT_INCR(sctps_queue_upd_ecne);
+ }
+ cnt = ntohl(ecne->num_pkts_since_cwr);
+ cnt++;
+ ecne->num_pkts_since_cwr = htonl(cnt);
+ return;
+ }
+ }
+ /* nope could not find one to update so we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ SCTP_STAT_INCR(sctps_queue_upd_ecne);
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ECN_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_ecne_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ stcb->asoc.ecn_echo_cnt_onq++;
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ecne->ch.chunk_type = SCTP_ECN_ECHO;
+ ecne->ch.chunk_flags = 0;
+ ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
+ ecne->tsn = htonl(high_tsn);
+ ecne->num_pkts_since_cwr = htonl(1);
+ TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct mbuf *m, int len, int iphlen, int bad_crc)
+{
+ struct sctp_association *asoc;
+ struct sctp_pktdrop_chunk *drp;
+ struct sctp_tmit_chunk *chk;
+ uint8_t *datap;
+ int was_trunc = 0;
+ int fullsz = 0;
+ long spc;
+ int offset;
+ struct sctp_chunkhdr *ch, chunk_buf;
+ unsigned int chk_length;
+
+ if (!stcb) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->pktdrop_supported == 0) {
+ /*-
+ * peer must declare support before I send one.
+ */
+ return;
+ }
+ if (stcb->sctp_socket == NULL) {
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ len -= iphlen;
+ chk->send_size = len;
+ /* Validate that we do not have an ABORT in here. */
+ offset = iphlen + sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* break to abort land */
+ break;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_PACKET_DROPPED:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_INITIATION_ACK:
+ /**
+ * We don't respond with an PKT-DROP to an ABORT
+ * or PKT-DROP. We also do not respond to an
+ * INIT-ACK, because we can't know if the initiation
+ * tag is correct or not.
+ */
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return;
+ default:
+ break;
+ }
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+
+ if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
+ min(stcb->asoc.smallest_mtu, MCLBYTES)) {
+ /* only send 1 mtu worth, trim off the
+ * excess on the end.
+ */
+ fullsz = len;
+ len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
+ was_trunc = 1;
+ }
+ chk->asoc = &stcb->asoc;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+jump_out:
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
+ if (drp == NULL) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ goto jump_out;
+ }
+ chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
+ sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
+ chk->book_size_scale = 0;
+ if (was_trunc) {
+ drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
+ drp->trunc_len = htons(fullsz);
+ /* Len is already adjusted to size minus overhead above
+ * take out the pkt_drop chunk itself from it.
+ */
+ chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
+ len = chk->send_size;
+ } else {
+ /* no truncation needed */
+ drp->ch.chunk_flags = 0;
+ drp->trunc_len = htons(0);
+ }
+ if (bad_crc) {
+ drp->ch.chunk_flags |= SCTP_BADCRC;
+ }
+ chk->send_size += sizeof(struct sctp_pktdrop_chunk);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (net) {
+ /* we should hit here */
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ } else {
+ chk->whoTo = NULL;
+ }
+ drp->ch.chunk_type = SCTP_PACKET_DROPPED;
+ drp->ch.chunk_length = htons(chk->send_size);
+ spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
+ if (spc < 0) {
+ spc = 0;
+ }
+ drp->bottle_bw = htonl(spc);
+ if (asoc->my_rwnd) {
+ drp->current_onq = htonl(asoc->size_on_reasm_queue +
+ asoc->size_on_all_streams +
+ asoc->my_rwnd_control_len +
+ stcb->sctp_socket->so_rcv.sb_cc);
+ } else {
+ /*-
+ * If my rwnd is 0, possibly from mbuf depletion as well as
+ * space used, tell the peer there is NO space aka onq == bw
+ */
+ drp->current_onq = htonl(spc);
+ }
+ drp->reserved = 0;
+ datap = drp->data;
+ m_copydata(m, iphlen, len, (caddr_t)datap);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
+{
+ struct sctp_association *asoc;
+ struct sctp_cwr_chunk *cwr;
+ struct sctp_tmit_chunk *chk;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (net == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
+ /* found a previous CWR queued to same destination update it if needed */
+ uint32_t ctsn;
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ ctsn = ntohl(cwr->tsn);
+ if (SCTP_TSN_GT(high_tsn, ctsn)) {
+ cwr->tsn = htonl(high_tsn);
+ }
+ if (override & SCTP_CWR_REDUCE_OVERRIDE) {
+ /* Make sure override is carried */
+ cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
+ }
+ return;
+ }
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ECN_CWR;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_cwr_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ cwr->ch.chunk_type = SCTP_ECN_CWR;
+ cwr->ch.chunk_flags = override;
+ cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
+ cwr->tsn = htonl(high_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+static int
+sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+ uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
+{
+ uint16_t len, old_len, i;
+ struct sctp_stream_reset_out_request *req_out;
+ struct sctp_chunkhdr *ch;
+ int at;
+ int number_entries=0;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+ /* get to new offset for the param. */
+ req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
+ (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
+ TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ number_entries++;
+ }
+ }
+ if (number_entries == 0) {
+ return (0);
+ }
+ if (number_entries == stcb->asoc.streamoutcnt) {
+ number_entries = 0;
+ }
+ if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
+ number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
+ }
+ len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
+ req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
+ req_out->ph.param_length = htons(len);
+ req_out->request_seq = htonl(seq);
+ req_out->response_seq = htonl(resp_seq);
+ req_out->send_reset_at_tsn = htonl(last_sent);
+ at = 0;
+ if (number_entries) {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
+ (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
+ TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ req_out->list_of_streams[at] = htons(i);
+ at++;
+ stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
+ if (at >= number_entries) {
+ break;
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*-
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_out->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return (1);
+}
+
+static void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t *list,
+ uint32_t seq)
+{
+ uint16_t len, old_len, i;
+ struct sctp_stream_reset_in_request *req_in;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
+ req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
+ req_in->ph.param_length = htons(len);
+ req_in->request_seq = htonl(seq);
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ req_in->list_of_streams[i] = htons(list[i]);
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*-
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_in->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+static void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t seq)
+{
+ uint16_t len, old_len;
+ struct sctp_stream_reset_tsn_request *req_tsn;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_tsn_request);
+ req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
+ req_tsn->ph.param_length = htons(len);
+ req_tsn->request_seq = htonl(seq);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result)
+{
+ uint16_t len, old_len;
+ struct sctp_stream_reset_response *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = ntohl(result);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+void
+sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
+ struct sctp_stream_reset_list *ent,
+ int response)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+
+ asoc = &stcb->asoc;
+
+ /*
+ * Reset our last reset action to the new one IP -> response
+ * (PERFORMED probably). This assures that if we fail to send, a
+ * retran from the peer will get the new response.
+ */
+ asoc->last_reset_action[0] = response;
+ if (asoc->stream_reset_outstanding) {
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->book_size = sizeof(struct sctp_chunkhdr);
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (stcb->asoc.alternate) {
+ chk->whoTo = stcb->asoc.alternate;
+ } else {
+ chk->whoTo = stcb->asoc.primary_destination;
+ }
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->book_size);
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ sctp_add_stream_reset_result(chk, ent->seq, response);
+ /* insert the chunk for sending */
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result,
+ uint32_t send_una, uint32_t recv_next)
+{
+ uint16_t len, old_len;
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response_tsn);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = htonl(result);
+ resp->senders_next_tsn = htonl(send_una);
+ resp->receivers_next_tsn = htonl(recv_next);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+static void
+sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
+ uint32_t seq,
+ uint16_t adding)
+{
+ uint16_t len, old_len;
+ struct sctp_chunkhdr *ch;
+ struct sctp_stream_reset_add_strm *addstr;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_add_strm);
+
+ /* Fill it out. */
+ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
+ addstr->ph.param_length = htons(len);
+ addstr->request_seq = htonl(seq);
+ addstr->number_of_streams = htons(adding);
+ addstr->reserved = 0;
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+static void
+sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
+ uint32_t seq,
+ uint16_t adding)
+{
+ uint16_t len, old_len;
+ struct sctp_chunkhdr *ch;
+ struct sctp_stream_reset_add_strm *addstr;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_add_strm);
+ /* Fill it out. */
+ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
+ addstr->ph.param_length = htons(len);
+ addstr->request_seq = htonl(seq);
+ addstr->number_of_streams = htons(adding);
+ addstr->reserved = 0;
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+int
+sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ uint32_t seq;
+
+ asoc = &stcb->asoc;
+ asoc->trigger_reset = 0;
+ if (asoc->stream_reset_outstanding) {
+ return (EALREADY);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->book_size = sizeof(struct sctp_chunkhdr);
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (stcb->asoc.alternate) {
+ chk->whoTo = stcb->asoc.alternate;
+ } else {
+ chk->whoTo = stcb->asoc.primary_destination;
+ }
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->book_size);
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ seq = stcb->asoc.str_reset_seq_out;
+ if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
+ seq++;
+ asoc->stream_reset_outstanding++;
+ } else {
+ m_freem(chk->data);
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ return (ENOENT);
+ }
+ asoc->str_reset = chk;
+ /* insert the chunk for sending */
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ asoc->ctrl_queue_cnt++;
+
+ if (stcb->asoc.send_sack) {
+ sctp_send_sack(stcb, so_locked);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ return (0);
+}
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+ uint16_t number_entries, uint16_t *list,
+ uint8_t send_in_req,
+ uint8_t send_tsn_req,
+ uint8_t add_stream,
+ uint16_t adding_o,
+ uint16_t adding_i, uint8_t peer_asked)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ int can_send_out_req=0;
+ uint32_t seq;
+
+ asoc = &stcb->asoc;
+ if (asoc->stream_reset_outstanding) {
+ /*-
+ * Already one pending, must get ACK back to clear the flag.
+ */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
+ return (EBUSY);
+ }
+ if ((send_in_req == 0) && (send_tsn_req == 0) &&
+ (add_stream == 0)) {
+ /* nothing to do */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (send_tsn_req && send_in_req) {
+ /* error, can't do that */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ } else if (send_in_req) {
+ can_send_out_req = 1;
+ }
+ if (number_entries > (MCLBYTES -
+ SCTP_MIN_OVERHEAD -
+ sizeof(struct sctp_chunkhdr) -
+ sizeof(struct sctp_stream_reset_out_request)) /
+ sizeof(uint16_t)) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->book_size = sizeof(struct sctp_chunkhdr);
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (stcb->asoc.alternate) {
+ chk->whoTo = stcb->asoc.alternate;
+ } else {
+ chk->whoTo = stcb->asoc.primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+
+ seq = stcb->asoc.str_reset_seq_out;
+ if (can_send_out_req) {
+ int ret;
+ ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
+ if (ret) {
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ }
+ if ((add_stream & 1) &&
+ ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
+ /* Need to allocate more */
+ struct sctp_stream_out *oldstream;
+ struct sctp_stream_queue_pending *sp, *nsp;
+ int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+ int j;
+#endif
+
+ oldstream = stcb->asoc.strmout;
+ /* get some more */
+ SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
+ (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
+ SCTP_M_STRMO);
+ if (stcb->asoc.strmout == NULL) {
+ uint8_t x;
+ stcb->asoc.strmout = oldstream;
+ /* Turn off the bit */
+ x = add_stream & 0xfe;
+ add_stream = x;
+ goto skip_stuff;
+ }
+ /* Ok now we proceed with copying the old out stuff and
+ * initializing the new stuff.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
+ stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
+ stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
+ stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
+ stcb->asoc.strmout[i].sid = i;
+ stcb->asoc.strmout[i].state = oldstream[i].state;
+ /* FIX ME FIX ME */
+ /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
+ stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
+ /* now anything on those queues? */
+ TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
+ TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
+ }
+
+ }
+ /* now the new streams */
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+ for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+ for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+ stcb->asoc.strmout[i].abandoned_sent[j] = 0;
+ stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
+ }
+#else
+ stcb->asoc.strmout[i].abandoned_sent[0] = 0;
+ stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
+#endif
+ stcb->asoc.strmout[i].next_mid_ordered = 0;
+ stcb->asoc.strmout[i].next_mid_unordered = 0;
+ stcb->asoc.strmout[i].sid = i;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
+ stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
+ }
+ stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
+ SCTP_FREE(oldstream, SCTP_M_STRMO);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+skip_stuff:
+ if ((add_stream & 1) && (adding_o > 0)) {
+ asoc->strm_pending_add_size = adding_o;
+ asoc->peer_req_out = peer_asked;
+ sctp_add_an_out_stream(chk, seq, adding_o);
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if ((add_stream & 2) && (adding_i > 0)) {
+ sctp_add_an_in_stream(chk, seq, adding_i);
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_in_req) {
+ sctp_add_stream_reset_in(chk, number_entries, list, seq);
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_tsn_req) {
+ sctp_add_stream_reset_tsn(chk, seq);
+ asoc->stream_reset_outstanding++;
+ }
+ asoc->str_reset = chk;
+ /* insert the chunk for sending */
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ asoc->ctrl_queue_cnt++;
+ if (stcb->asoc.send_sack) {
+ sctp_send_sack(stcb, SCTP_SO_LOCKED);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ return (0);
+}
+
+void
+sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ /* Don't respond to an ABORT with an ABORT. */
+ if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
+ if (cause)
+ sctp_m_freem(cause);
+ return;
+ }
+ sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ return;
+}
+
+void
+sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ return;
+}
+
+static struct mbuf *
+sctp_copy_resume(struct uio *uio,
+ int max_send_len,
+#if defined(__FreeBSD__) || defined(__Userspace__)
+ int user_marks_eor,
+#endif
+ int *error,
+ uint32_t *sndout,
+ struct mbuf **new_tail)
+{
+#if defined(__FreeBSD__) || defined(__Userspace__)
+ struct mbuf *m;
+
+ m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
+ (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ *error = ENOBUFS;
+ } else {
+ *sndout = m_length(m, NULL);
+ *new_tail = m_last(m);
+ }
+ return (m);
+#else
+ int left, cancpy, willcpy;
+ struct mbuf *m, *head;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ left = (int)min(uio->uio_resid, max_send_len);
+#else
+ left = (int)min(uio_resid(uio), max_send_len);
+#endif
+#else
+ left = (int)min(uio->uio_resid, max_send_len);
+#endif
+ /* Always get a header just in case */
+ head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+ if (head == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ cancpy = (int)M_TRAILINGSPACE(head);
+ willcpy = min(cancpy, left);
+ *error = uiomove(mtod(head, caddr_t), willcpy, uio);
+ if (*error) {
+ sctp_m_freem(head);
+ return (NULL);
+ }
+ *sndout += willcpy;
+ left -= willcpy;
+ SCTP_BUF_LEN(head) = willcpy;
+ m = head;
+ *new_tail = head;
+ while (left > 0) {
+ /* move in user data */
+ SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ sctp_m_freem(head);
+ *new_tail = NULL;
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ m = SCTP_BUF_NEXT(m);
+ cancpy = (int)M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ *error = uiomove(mtod(m, caddr_t), willcpy, uio);
+ if (*error) {
+ sctp_m_freem(head);
+ *new_tail = NULL;
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ *error = EFAULT;
+ return (NULL);
+ }
+ SCTP_BUF_LEN(m) = willcpy;
+ left -= willcpy;
+ *sndout += willcpy;
+ *new_tail = m;
+ if (left == 0) {
+ SCTP_BUF_NEXT(m) = NULL;
+ }
+ }
+ return (head);
+#endif
+}
+
+static int
+sctp_copy_one(struct sctp_stream_queue_pending *sp,
+ struct uio *uio,
+ int resv_upfront)
+{
+#if defined(__FreeBSD__) || defined(__Userspace__)
+ sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
+ resv_upfront, 0);
+ if (sp->data == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ return (ENOBUFS);
+ }
+
+ sp->tail_mbuf = m_last(sp->data);
+ return (0);
+#else
+ int left;
+ int cancpy, willcpy, error;
+ struct mbuf *m, *head;
+ int cpsz = 0;
+
+ /* First one gets a header */
+ left = sp->length;
+ head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ return (ENOBUFS);
+ }
+ /*-
+ * Add this one for m in now, that way if the alloc fails we won't
+ * have a bad cnt.
+ */
+ SCTP_BUF_RESV_UF(m, resv_upfront);
+ cancpy = (int)M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ while (left > 0) {
+ /* move in user data */
+ error = uiomove(mtod(m, caddr_t), willcpy, uio);
+ if (error) {
+ sctp_m_freem(head);
+ return (error);
+ }
+ SCTP_BUF_LEN(m) = willcpy;
+ left -= willcpy;
+ cpsz += willcpy;
+ if (left > 0) {
+ SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ /*
+ * the head goes back to caller, he can free
+ * the rest
+ */
+ sctp_m_freem(head);
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ return (ENOBUFS);
+ }
+ m = SCTP_BUF_NEXT(m);
+ cancpy = (int)M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ } else {
+ sp->tail_mbuf = m;
+ SCTP_BUF_NEXT(m) = NULL;
+ }
+ }
+ sp->data = head;
+ sp->length = cpsz;
+ return (0);
+#endif
+}
+
+
+
+static struct sctp_stream_queue_pending *
+sctp_copy_it_in(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ struct uio *uio,
+ struct sctp_nets *net,
+ ssize_t max_send_len,
+ int user_marks_eor,
+ int *error)
+
+{
+ /*-
+ * This routine must be very careful in its work. Protocol
+ * processing is up and running so care must be taken to spl...()
+ * when you need to do something that may effect the stcb/asoc. The
+ * sb is locked however. When data is copied the protocol processing
+ * should be enabled since this is a slower operation...
+ */
+ struct sctp_stream_queue_pending *sp = NULL;
+ int resv_in_first;
+
+ *error = 0;
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ *error = ECONNRESET;
+ goto out_now;
+ }
+ sctp_alloc_a_strmoq(stcb, sp);
+ if (sp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ *error = ENOMEM;
+ goto out_now;
+ }
+ sp->act_flags = 0;
+ sp->sender_all_done = 0;
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->fsn = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+
+ sp->sid = srcv->sinfo_stream;
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
+#else
+ sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
+#endif
+#else
+ sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if ((sp->length == (uint32_t)uio->uio_resid) &&
+#else
+ if ((sp->length == (uint32_t)uio_resid(uio)) &&
+#endif
+#else
+ if ((sp->length == (uint32_t)uio->uio_resid) &&
+#endif
+ ((user_marks_eor == 0) ||
+ (srcv->sinfo_flags & SCTP_EOF) ||
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ sp->sender_all_done = 0;
+ sp->some_taken = 0;
+ sp->put_last_out = 0;
+ resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
+ sp->data = sp->tail_mbuf = NULL;
+ if (sp->length == 0) {
+ goto skip_copy;
+ }
+ if (srcv->sinfo_keynumber_valid) {
+ sp->auth_keyid = srcv->sinfo_keynumber;
+ } else {
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ }
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ sctp_auth_key_acquire(stcb, sp->auth_keyid);
+ sp->holds_key_ref = 1;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
+#endif
+ *error = sctp_copy_one(sp, uio, resv_in_first);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
+#endif
+ skip_copy:
+ if (*error) {
+#if defined(__Userspace__)
+ SCTP_TCB_LOCK(stcb);
+#endif
+ sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
+#if defined(__Userspace__)
+ SCTP_TCB_UNLOCK(stcb);
+#endif
+ sp = NULL;
+ } else {
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ atomic_add_int(&sp->net->ref_count, 1);
+ } else {
+ sp->net = NULL;
+ }
+ sctp_set_prsctp_policy(sp);
+ }
+out_now:
+ return (sp);
+}
+
+
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+#if defined(__APPLE__) && !defined(__Userspace__)
+ int flags
+#else
+ int flags,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *p
+#elif defined(_WIN32) && !defined(__Userspace__)
+ PKTHREAD p
+#else
+#if defined(__Userspace__)
+ /*
+ * proc is a dummy in __Userspace__ and will not be passed
+ * to sctp_lower_sosend
+ */
+#endif
+ struct proc *p
+#endif
+#endif
+)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct proc *p = current_proc();
+#endif
+ int error, use_sndinfo = 0;
+ struct sctp_sndrcvinfo sndrcvninfo;
+ struct sockaddr *addr_to_use;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin;
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(so, 1);
+#endif
+ if (control) {
+ /* process cmsg snd/rcv info (maybe a assoc-id) */
+ if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
+ sizeof(sndrcvninfo))) {
+ /* got one */
+ use_sndinfo = 1;
+ }
+ }
+ addr_to_use = addr;
+#if defined(INET) && defined(INET6)
+ if ((addr) && (addr->sa_family == AF_INET6)) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin, sin6);
+ addr_to_use = (struct sockaddr *)&sin;
+ }
+ }
+#endif
+ error = sctp_lower_sosend(so, addr_to_use, uio, top,
+ control,
+ flags,
+ use_sndinfo ? &sndrcvninfo: NULL
+#if !defined(__Userspace__)
+ , p
+#endif
+ );
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (error);
+}
+
+
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *i_pak,
+ struct mbuf *control,
+ int flags,
+ struct sctp_sndrcvinfo *srcv
+#if !defined(__Userspace__)
+ ,
+#if defined(__FreeBSD__)
+ struct thread *p
+#elif defined(_WIN32)
+ PKTHREAD p
+#else
+ struct proc *p
+#endif
+#endif
+ )
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ ssize_t sndlen = 0, max_len, local_add_more;
+ int error, len;
+ struct mbuf *top = NULL;
+ int queue_only = 0, queue_only_for_init = 0;
+ int free_cnt_applied = 0;
+ int un_sent;
+ int now_filled = 0;
+ unsigned int inqueue_bytes = 0;
+ struct sctp_block_entry be;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+ struct timeval now;
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_inpcb *t_inp;
+ int user_marks_eor;
+ int create_lock_applied = 0;
+ int nagle_applies = 0;
+ int some_on_control = 0;
+ int got_all_of_the_send = 0;
+ int hold_tcblock = 0;
+ int non_blocking = 0;
+ ssize_t local_soresv = 0;
+ uint16_t port;
+ uint16_t sinfo_flags;
+ sctp_assoc_t sinfo_assoc_id;
+
+ error = 0;
+ net = NULL;
+ stcb = NULL;
+ asoc = NULL;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sctp_lock_assert(so);
+#endif
+ t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ if (i_pak) {
+ SCTP_RELEASE_PKT(i_pak);
+ }
+ return (error);
+ }
+ if ((uio == NULL) && (i_pak == NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ atomic_add_int(&inp->total_sends, 1);
+ if (uio) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if (uio->uio_resid < 0) {
+#else
+ if (uio_resid(uio) < 0) {
+#endif
+#else
+ if (uio->uio_resid < 0) {
+#endif
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ sndlen = uio->uio_resid;
+#else
+ sndlen = uio_resid(uio);
+#endif
+#else
+ sndlen = uio->uio_resid;
+#endif
+ } else {
+ top = SCTP_HEADER_TO_CHAIN(i_pak);
+ sndlen = SCTP_HEADER_LEN(i_pak);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
+ (void *)addr,
+ sndlen);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ SCTP_IS_LISTENING(inp)) {
+ /* The listener can NOT send */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ /**
+ * Pre-screen address, if one is given the sin-len
+ * must be set correctly!
+ */
+ if (addr) {
+ union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
+ switch (raddr->sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SIN_LEN
+ if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+#endif
+ port = raddr->sin.sin_port;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SIN6_LEN
+ if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+#endif
+ port = raddr->sin6.sin6_port;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+#ifdef HAVE_SCONN_LEN
+ if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+#endif
+ port = raddr->sconn.sconn_port;
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ goto out_unlocked;
+ }
+ } else
+ port = 0;
+
+ if (srcv) {
+ sinfo_flags = srcv->sinfo_flags;
+ sinfo_assoc_id = srcv->sinfo_assoc_id;
+ if (INVALID_SINFO_FLAG(sinfo_flags) ||
+ PR_SCTP_INVALID_POLICY(sinfo_flags)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if (srcv->sinfo_flags)
+ SCTP_STAT_INCR(sctps_sends_with_flags);
+ } else {
+ sinfo_flags = inp->def_send.sinfo_flags;
+ sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (flags & MSG_EOR) {
+ sinfo_flags |= SCTP_EOR;
+ }
+ if (flags & MSG_EOF) {
+ sinfo_flags |= SCTP_EOF;
+ }
+#endif
+ if (sinfo_flags & SCTP_SENDALL) {
+ /* its a sendall */
+ error = sctp_sendall(inp, uio, top, srcv);
+ top = NULL;
+ goto out_unlocked;
+ }
+ if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ /* now we must find the assoc */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sinfo_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
+ if (stcb != NULL) {
+ hold_tcblock = 1;
+ }
+ } else if (addr) {
+ /*-
+ * Since we did not use findep we must
+ * increment it, and if we don't find a tcb
+ * decrement it.
+ */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ hold_tcblock = 1;
+ }
+ }
+ if ((stcb == NULL) && (addr)) {
+ /* Possible implicit send? */
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_applied = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* Should I really unlock ? */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+
+ }
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ /* With the lock applied look again */
+ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+#if defined(INET) || defined(INET6)
+ if ((stcb == NULL) && (control != NULL) && (port > 0)) {
+ stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
+ }
+#endif
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ hold_tcblock = 1;
+ }
+ if (error) {
+ goto out_unlocked;
+ }
+ if (t_inp != inp) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ }
+ if (stcb == NULL) {
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+ error = ENOENT;
+ goto out_unlocked;
+ } else {
+ /* We must go ahead and start the INIT process */
+ uint32_t vrf_id;
+
+ if ((sinfo_flags & SCTP_ABORT) ||
+ ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
+ /*-
+ * User asks to abort a non-existant assoc,
+ * or EOF a non-existant assoc with no data
+ */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+ error = ENOENT;
+ goto out_unlocked;
+ }
+ /* get an asoc/stcb struct */
+ vrf_id = inp->def_vrf_id;
+#ifdef INVARIANTS
+ if (create_lock_applied == 0) {
+ panic("Error, should hold create lock and I don't?");
+ }
+#endif
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+ inp->sctp_ep.pre_open_stream_count,
+ inp->sctp_ep.port,
+#if !defined(__Userspace__)
+ p,
+#else
+ (struct proc *)NULL,
+#endif
+ SCTP_INITIALIZE_AUTH_PARAMS);
+ if (stcb == NULL) {
+ /* Error is setup for us in the call */
+ goto out_unlocked;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ hold_tcblock = 1;
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ } else {
+ SCTP_PRINTF("Huh-3? create lock should have been on??\n");
+ }
+ /* Turn on queue only flag to prevent data from being sent */
+ queue_only = 1;
+ asoc = &stcb->asoc;
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+
+ if (control) {
+ if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
+ sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out_unlocked;
+ }
+ }
+ /* out with the INIT */
+ queue_only_for_init = 1;
+ /*-
+ * we may want to dig in after this call and adjust the MTU
+ * value. It defaulted to 1500 (constant) but the ro
+ * structure may now have an update and thus we may need to
+ * change it BEFORE we append the message.
+ */
+ }
+ } else
+ asoc = &stcb->asoc;
+ if (srcv == NULL) {
+ srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
+ sinfo_flags = srcv->sinfo_flags;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (flags & MSG_EOR) {
+ sinfo_flags |= SCTP_EOR;
+ }
+ if (flags & MSG_EOF) {
+ sinfo_flags |= SCTP_EOF;
+ }
+#endif
+ }
+ if (sinfo_flags & SCTP_ADDR_OVER) {
+ if (addr)
+ net = sctp_findnet(stcb, addr);
+ else
+ net = NULL;
+ if ((net == NULL) ||
+ ((port != 0) && (port != stcb->rport))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ } else {
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ }
+ atomic_add_int(&stcb->total_sends, 1);
+ /* Keep the stcb from being freed under our feet */
+ atomic_add_int(&asoc->refcnt, 1);
+ free_cnt_applied = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
+ if (sndlen > (ssize_t)asoc->smallest_mtu) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out_unlocked;
+ }
+ }
+#if defined(__Userspace__)
+ if (inp->recv_callback) {
+ non_blocking = 1;
+ }
+#endif
+ if (SCTP_SO_IS_NBIO(so)
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
+#endif
+ ) {
+ non_blocking = 1;
+ }
+ /* would we block? */
+ if (non_blocking) {
+ ssize_t amount;
+
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ if (user_marks_eor == 0) {
+ amount = sndlen;
+ } else {
+ amount = 1;
+ }
+ if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
+ (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
+ if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
+ error = EMSGSIZE;
+ else
+ error = EWOULDBLOCK;
+ goto out_unlocked;
+ }
+ stcb->asoc.sb_send_resv += (uint32_t)sndlen;
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ } else {
+ atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
+ }
+ local_soresv = sndlen;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_unlocked;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ }
+ /* Is the stream no. valid? */
+ if (srcv->sinfo_stream >= asoc->streamoutcnt) {
+ /* Invalid stream number */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
+ (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
+ /*
+ * Can't queue any data while stream reset is underway.
+ */
+ if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
+ error = EAGAIN;
+ } else {
+ error = EINVAL;
+ }
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
+ goto out_unlocked;
+ }
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ queue_only = 1;
+ }
+ /* we are now done with all control */
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ if (sinfo_flags & SCTP_ABORT) {
+ ;
+ } else {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_unlocked;
+ }
+ }
+ /* Ok, we will attempt a msgsnd :> */
+#if !(defined(_WIN32) || defined(__Userspace__))
+ if (p) {
+#if defined(__FreeBSD__)
+ p->td_ru.ru_msgsnd++;
+#else
+ p->p_stats->p_ru.ru_msgsnd++;
+#endif
+ }
+#endif
+ /* Are we aborting? */
+ if (sinfo_flags & SCTP_ABORT) {
+ struct mbuf *mm;
+ ssize_t tot_demand, tot_out = 0, max_out;
+
+ SCTP_STAT_INCR(sctps_sends_with_abort);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* It has to be up before we abort */
+ /* how big is the user initiated abort? */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ if (top) {
+ struct mbuf *cntm = NULL;
+
+ mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
+ if (sndlen != 0) {
+ for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
+ tot_out += SCTP_BUF_LEN(cntm);
+ }
+ }
+ } else {
+ /* Must fit in a MTU */
+ tot_out = sndlen;
+ tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+ if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
+ /* To big */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out;
+ }
+ mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
+ }
+ if (mm == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ error = ENOMEM;
+ goto out;
+ }
+ max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
+ max_out -= sizeof(struct sctp_abort_msg);
+ if (tot_out > max_out) {
+ tot_out = max_out;
+ }
+ if (mm) {
+ struct sctp_paramhdr *ph;
+
+ /* now move forward the data pointer */
+ ph = mtod(mm, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
+ ph++;
+ SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
+ if (top == NULL) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+ error = uiomove((caddr_t)ph, (int)tot_out, uio);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(so, 0);
+#endif
+ if (error) {
+ /*-
+ * Here if we can't get his data we
+ * still abort we just don't get to
+ * send the users note :-0
+ */
+ sctp_m_freem(mm);
+ mm = NULL;
+ }
+ } else {
+ if (sndlen != 0) {
+ SCTP_BUF_NEXT(mm) = top;
+ }
+ }
+ }
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ /* release this lock, otherwise we hang on ourselves */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ /* now relock the stcb so everything is sane */
+ hold_tcblock = 0;
+ stcb = NULL;
+ /* In this case top is already chained to mm
+ * avoid double free, since we free it below if
+ * top != NULL and driver would free it after sending
+ * the packet out
+ */
+ if (sndlen != 0) {
+ top = NULL;
+ }
+ goto out_unlocked;
+ }
+ /* Calculate the maximum we can send */
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ } else {
+ max_len = 0;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ if (asoc->strmout == NULL) {
+ /* huh? software error */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ error = EFAULT;
+ goto out_unlocked;
+ }
+
+ /* Unless E_EOR mode is on, we must make a send FIT in one call. */
+ if ((user_marks_eor == 0) &&
+ (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
+ /* It will NEVER fit */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out_unlocked;
+ }
+ if ((uio == NULL) && user_marks_eor) {
+ /*-
+ * We do not support eeor mode for
+ * sending with mbuf chains (like sendfile).
+ */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
+ if (user_marks_eor) {
+ local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
+ } else {
+ /*-
+ * For non-eeor the whole message must fit in
+ * the socket send buffer.
+ */
+ local_add_more = sndlen;
+ }
+ len = 0;
+ if (non_blocking) {
+ goto skip_preblock;
+ }
+ if (((max_len <= local_add_more) &&
+ ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
+ (max_len == 0) ||
+ ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ /* No room right now ! */
+ SOCKBUF_LOCK(&so->so_snd);
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
+ ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
+ (unsigned int)SCTP_SB_LIMIT_SND(so),
+ inqueue_bytes,
+ local_add_more,
+ stcb->asoc.stream_queue_cnt,
+ stcb->asoc.chunks_on_out_queue,
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
+ }
+ be.error = 0;
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ stcb->block_entry = &be;
+#endif
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ asoc, stcb->asoc.total_output_queue_size);
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ }
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ } else {
+ max_len = 0;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ }
+
+skip_preblock:
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+#endif
+ /* sndlen covers for mbuf case
+ * uio_resid covers for the non-mbuf case
+ * NOTE: uio will be null when top/mbuf is passed
+ */
+ if (sndlen == 0) {
+ if (sinfo_flags & SCTP_EOF) {
+ got_all_of_the_send = 1;
+ goto dataless_eof;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ }
+ if (top == NULL) {
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_stream_out *strm;
+ uint32_t sndout;
+
+ SCTP_TCB_SEND_LOCK(stcb);
+ if ((asoc->stream_locked) &&
+ (asoc->stream_locked_on != srcv->sinfo_stream)) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ if (strm->last_msg_incomplete == 0) {
+ do_a_copy_in:
+ sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
+ if (error) {
+ goto out;
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (sp->msg_is_complete) {
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ } else {
+ /* Just got locked to this guy in
+ * case of an interrupt.
+ */
+ strm->last_msg_incomplete = 1;
+ if (stcb->asoc.idata_supported == 0) {
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ }
+ sp->sender_all_done = 0;
+ }
+ sctp_snd_sb_alloc(stcb, sp->length);
+ atomic_add_int(&asoc->stream_queue_cnt, 1);
+ if (sinfo_flags & SCTP_UNORDERED) {
+ SCTP_STAT_INCR(sctps_sends_with_unord);
+ }
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ } else {
+ SCTP_TCB_SEND_LOCK(stcb);
+ sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ if (sp == NULL) {
+ /* ???? Huh ??? last msg is gone */
+#ifdef INVARIANTS
+ panic("Warning: Last msg marked incomplete, yet nothing left?");
+#else
+ SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
+ strm->last_msg_incomplete = 0;
+#endif
+ goto do_a_copy_in;
+
+ }
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ while (uio->uio_resid > 0) {
+#else
+ while (uio_resid(uio) > 0) {
+#endif
+#else
+ while (uio->uio_resid > 0) {
+#endif
+ /* How much room do we have? */
+ struct mbuf *new_tail, *mm;
+
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ else
+ max_len = 0;
+
+ if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
+ (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ (uio->uio_resid && (uio->uio_resid <= max_len))) {
+#else
+ (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
+#endif
+#else
+ (uio->uio_resid && (uio->uio_resid <= max_len))) {
+#endif
+ sndout = 0;
+ new_tail = NULL;
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+#if defined(__FreeBSD__) || defined(__Userspace__)
+ mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
+#else
+ mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(so, 0);
+#endif
+ if ((mm == NULL) || error) {
+ if (mm) {
+ sctp_m_freem(mm);
+ }
+ goto out;
+ }
+ /* Update the mbuf and count */
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* we need to get out.
+ * Peer probably aborted.
+ */
+ sctp_m_freem(mm);
+ if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto out;
+ }
+ if (sp->tail_mbuf) {
+ /* tack it to the end */
+ SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
+ sp->tail_mbuf = new_tail;
+ } else {
+ /* A stolen mbuf */
+ sp->data = mm;
+ sp->tail_mbuf = new_tail;
+ }
+ sctp_snd_sb_alloc(stcb, sndout);
+ atomic_add_int(&sp->length, sndout);
+ len += sndout;
+ if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
+ sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
+ }
+
+ /* Did we reach EOR? */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if ((uio->uio_resid == 0) &&
+#else
+ if ((uio_resid(uio) == 0) &&
+#endif
+#else
+ if ((uio->uio_resid == 0) &&
+#endif
+ ((user_marks_eor == 0) ||
+ (sinfo_flags & SCTP_EOF) ||
+ (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if (uio->uio_resid == 0) {
+#else
+ if (uio_resid(uio) == 0) {
+#endif
+#else
+ if (uio->uio_resid == 0) {
+#endif
+ /* got it all? */
+ continue;
+ }
+ /* PR-SCTP? */
+ if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
+ /* This is ugly but we must assure locking order */
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ else
+ max_len = 0;
+ if (max_len > 0) {
+ continue;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ /* wait for space now */
+ if (non_blocking) {
+ /* Non-blocking io in place out */
+ goto skip_out_eof;
+ }
+ /* What about the INIT, send it maybe */
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ /* a collision took us forward? */
+ queue_only = 0;
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ queue_only = 1;
+ }
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+ queue_only = 1;
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (2 * net->mtu)) {
+ queue_only = 1;
+ }
+ asoc->ifp_had_enobuf = 0;
+ }
+ un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+
+ /*-
+ * Ok, Nagle is set on and we have data outstanding.
+ * Don't send anything and let SACKs drive out the
+ * data unless we have a "full" segment to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+ }
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+ nagle_applies, un_sent);
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+ stcb->asoc.total_flight,
+ stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+ }
+ if (queue_only_for_init)
+ queue_only_for_init = 0;
+ if ((queue_only == 0) && (nagle_applies == 0)) {
+ /*-
+ * need to start chunk output
+ * before blocking.. note that if
+ * a lock is already applied, then
+ * the input via the net is happening
+ * and I don't need to start output :-D
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ if (hold_tcblock == 0) {
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ hold_tcblock = 1;
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+ } else {
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ }
+ if (hold_tcblock == 1) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ /*-
+ * This is a bit strange, but I think it will
+ * work. The total_output_queue_size is locked and
+ * protected by the TCB_LOCK, which we just released.
+ * There is a race that can occur between releasing it
+ * above, and me getting the socket lock, where sacks
+ * come in but we have not put the SB_WAIT on the
+ * so_snd buffer to get the wakeup. After the LOCK
+ * is applied the sack_processing will also need to
+ * LOCK the so->so_snd to do the actual sowwakeup(). So
+ * once we have the socket buffer lock if we recheck the
+ * size we KNOW we will get to sleep safely with the
+ * wakeup flag in place.
+ */
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
+ if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
+ min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+ asoc, uio->uio_resid);
+#else
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+ asoc, uio_resid(uio));
+#endif
+#else
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+ asoc, uio->uio_resid);
+#endif
+ }
+ be.error = 0;
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ stcb->block_entry = &be;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&so->so_snd, 1);
+#endif
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ asoc, stcb->asoc.total_output_queue_size);
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto out_unlocked;
+ }
+ if (sp) {
+ if (sp->msg_is_complete == 0) {
+ strm->last_msg_incomplete = 1;
+ if (stcb->asoc.idata_supported == 0) {
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ }
+ } else {
+ sp->sender_all_done = 1;
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ }
+ } else {
+ SCTP_PRINTF("Huh no sp TSNH?\n");
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if (uio->uio_resid == 0) {
+#else
+ if (uio_resid(uio) == 0) {
+#endif
+#else
+ if (uio->uio_resid == 0) {
+#endif
+ got_all_of_the_send = 1;
+ }
+ } else {
+ /* We send in a 0, since we do NOT have any locks */
+ error = sctp_msg_append(stcb, net, top, srcv, 0);
+ top = NULL;
+ if (sinfo_flags & SCTP_EOF) {
+ got_all_of_the_send = 1;
+ }
+ }
+ if (error) {
+ goto out;
+ }
+dataless_eof:
+ /* EOF thing ? */
+ if ((sinfo_flags & SCTP_EOF) &&
+ (got_all_of_the_send == 1)) {
+ SCTP_STAT_INCR(sctps_sends_with_eof);
+ error = 0;
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ struct sctp_nets *netp;
+
+ /* only send SHUTDOWN the first time through */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (stcb->asoc.alternate) {
+ netp = stcb->asoc.alternate;
+ } else {
+ netp = stcb->asoc.primary_destination;
+ }
+ sctp_send_shutdown(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ NULL);
+ }
+ } else {
+ /*-
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ /*-
+ * XXX sockets draft says that SCTP_EOF should be
+ * sent with no data. currently, we will allow user
+ * data to be sent first and move to
+ * SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ abort_anyway:
+ if (free_cnt_applied) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ }
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "%s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ op_err, SCTP_SO_LOCKED);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ /* now relock the stcb so everything is sane */
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ NULL);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
+ }
+ }
+ }
+skip_out_eof:
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ some_on_control = 1;
+ }
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ /* a collision took us forward? */
+ queue_only = 0;
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ queue_only = 1;
+ }
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (stcb->asoc.sctp_cmt_on_off == 0)) {
+ SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+ queue_only = 1;
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (2 * net->mtu)) {
+ queue_only = 1;
+ }
+ asoc->ifp_had_enobuf = 0;
+ }
+ un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+ /*-
+ * Ok, Nagle is set on and we have data outstanding.
+ * Don't send anything and let SACKs drive out the
+ * data unless wen have a "full" segment to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+ }
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+ nagle_applies, un_sent);
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+ stcb->asoc.total_flight,
+ stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
+ /* we can attempt to send too. */
+ if (hold_tcblock == 0) {
+ /* If there is activity recv'ing sacks no need to send */
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ hold_tcblock = 1;
+ }
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+ } else if ((queue_only == 0) &&
+ (stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight == 0)) {
+ /* We get to have a probe outstanding */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ } else if (some_on_control) {
+ int num_out, reason, frag_point;
+
+ /* Here we do control only */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
+ queue_only, stcb->asoc.peers_rwnd, un_sent,
+ stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
+ stcb->asoc.total_output_queue_size, error);
+
+out:
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&so->so_snd, 1);
+#endif
+out_unlocked:
+
+ if (local_soresv && stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+ if ((stcb) && hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if (stcb && free_cnt_applied) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+#ifdef INVARIANTS
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (stcb) {
+ if (mtx_owned(&stcb->tcb_mtx)) {
+ panic("Leaving with tcb mtx owned?");
+ }
+ if (mtx_owned(&stcb->tcb_send_mtx)) {
+ panic("Leaving with tcb send mtx owned?");
+ }
+ }
+#endif
+#endif
+ if (top) {
+ sctp_m_freem(top);
+ }
+ if (control) {
+ sctp_m_freem(control);
+ }
+ return (error);
+}
+
+
+/*
+ * generate an AUTHentication chunk, if required
+ */
+struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+ struct sctp_tcb *stcb, uint8_t chunk)
+{
+ struct mbuf *m_auth;
+ struct sctp_auth_chunk *auth;
+ int chunk_len;
+ struct mbuf *cn;
+
+ if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
+ (stcb == NULL))
+ return (m);
+
+ if (stcb->asoc.auth_supported == 0) {
+ return (m);
+ }
+ /* does the requested chunk require auth? */
+ if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
+ return (m);
+ }
+ m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
+ if (m_auth == NULL) {
+ /* no mbuf's */
+ return (m);
+ }
+ /* reserve some space if this will be the first mbuf */
+ if (m == NULL)
+ SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
+ /* fill in the AUTH chunk details */
+ auth = mtod(m_auth, struct sctp_auth_chunk *);
+ memset(auth, 0, sizeof(*auth));
+ auth->ch.chunk_type = SCTP_AUTHENTICATION;
+ auth->ch.chunk_flags = 0;
+ chunk_len = sizeof(*auth) +
+ sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ auth->ch.chunk_length = htons(chunk_len);
+ auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
+ /* key id and hmac digest will be computed and filled in upon send */
+
+ /* save the offset where the auth was inserted into the chain */
+ *offset = 0;
+ for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
+ *offset += SCTP_BUF_LEN(cn);
+ }
+
+ /* update length and return pointer to the auth chunk */
+ SCTP_BUF_LEN(m_auth) = chunk_len;
+ m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
+ if (auth_ret != NULL)
+ *auth_ret = auth;
+
+ return (m);
+}
+
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+#ifdef INET6
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
+{
+ struct nd_prefix *pfx = NULL;
+ struct nd_pfxrouter *pfxrtr = NULL;
+ struct sockaddr_in6 gw6;
+
+#if defined(__FreeBSD__)
+ if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
+#else
+ if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
+#endif
+ return (0);
+
+ /* get prefix entry of address */
+#if defined(__FreeBSD__)
+ ND6_RLOCK();
+#endif
+ LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
+ if (pfx->ndpr_stateflags & NDPRF_DETACHED)
+ continue;
+ if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
+ &src6->sin6_addr, &pfx->ndpr_mask))
+ break;
+ }
+ /* no prefix entry in the prefix list */
+ if (pfx == NULL) {
+#if defined(__FreeBSD__)
+ ND6_RUNLOCK();
+#endif
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+ return (0);
+ }
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+
+ /* search installed gateway from prefix entry */
+ LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
+ memset(&gw6, 0, sizeof(struct sockaddr_in6));
+ gw6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ gw6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
+ sizeof(struct in6_addr));
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
+#if defined(__FreeBSD__)
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
+#else
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+#endif
+#if defined(__FreeBSD__)
+ if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
+ ND6_RUNLOCK();
+#else
+ if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
+#endif
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
+ return (1);
+ }
+ }
+#if defined(__FreeBSD__)
+ ND6_RUNLOCK();
+#endif
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
+ return (0);
+}
+#endif
+
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
+{
+#ifdef INET
+ struct sockaddr_in *sin, *mask;
+ struct ifaddr *ifa;
+ struct in_addr srcnetaddr, gwnetaddr;
+
+#if defined(__FreeBSD__)
+ if (ro == NULL || ro->ro_nh == NULL ||
+#else
+ if (ro == NULL || ro->ro_rt == NULL ||
+#endif
+ sifa->address.sa.sa_family != AF_INET) {
+ return (0);
+ }
+ ifa = (struct ifaddr *)sifa->ifa;
+ mask = (struct sockaddr_in *)(ifa->ifa_netmask);
+ sin = &sifa->address.sin;
+ srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
+
+#if defined(__FreeBSD__)
+ sin = &ro->ro_nh->gw4_sa;
+#else
+ sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
+#endif
+ gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
+#if defined(__FreeBSD__)
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
+#else
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+#endif
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
+ if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
+ return (1);
+ }
+#endif
+ return (0);
+}
+#elif defined(__Userspace__)
+/* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
+{
+ return (0);
+}
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
+{
+ return (0);
+}
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.h
new file mode 100644
index 000000000..4849c2b49
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_output.h
@@ -0,0 +1,248 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.h 362054 2020-06-11 13:34:09Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_OUTPUT_H_
+#define _NETINET_SCTP_OUTPUT_H_
+
+#include <netinet/sctp_header.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_scoping *scope,
+ struct mbuf *m_at,
+ int cnt_inits_to,
+ uint16_t *padding_len, uint16_t *chunk_len);
+
+
+int sctp_is_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+ struct sctp_scoping *scope,
+ int do_update);
+
+int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa);
+
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ sctp_route_t *ro, struct sctp_nets *net,
+ int non_asoc_addr_ok, uint32_t vrf_id);
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro);
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro);
+#endif
+
+void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int);
+
+void
+sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, struct mbuf *,
+ int, int,
+ struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, struct sctp_init_chunk *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t,
+#endif
+ uint32_t, uint16_t);
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
+ struct sctp_chunkhdr *, int *, int *);
+void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
+
+int
+sctp_send_cookie_echo(struct mbuf *, int, int, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void sctp_send_cookie_ack(struct sctp_tcb *);
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sctp_nets *);
+
+void
+sctp_remove_from_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holds_lock);
+
+
+void sctp_send_shutdown(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_ack(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *, int);
+
+void sctp_send_shutdown_complete2(struct sockaddr *, struct sockaddr *,
+ struct sctphdr *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t, uint16_t,
+#endif
+ uint32_t, uint16_t);
+
+void sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *, int addr_locked);
+
+void sctp_send_asconf_ack(struct sctp_tcb *);
+
+int sctp_get_frag_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_cookies(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_asconf(struct sctp_tcb *);
+
+void sctp_fix_ecn_echo(struct sctp_association *);
+
+void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net);
+
+
+#define SCTP_DATA_CHUNK_OVERHEAD(stcb) ((stcb)->asoc.idata_supported ? \
+ sizeof(struct sctp_idata_chunk) : \
+ sizeof(struct sctp_data_chunk))
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+int
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct thread *, int);
+#elif defined(_WIN32) && !defined(__Userspace__)
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, PKTHREAD, int);
+#else
+#if defined(__Userspace__)
+/* sctp_output is called bu sctp_sendm. Not using sctp_sendm for __Userspace__ */
+#endif
+int
+sctp_output(struct sctp_inpcb *,
+ struct mbuf *,
+ struct sockaddr *,
+ struct mbuf *,
+ struct proc *, int);
+#endif
+
+void sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int, int);
+
+void sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *, int);
+
+void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_send_sack(struct sctp_tcb *, int);
+
+void sctp_send_hb(struct sctp_tcb *, struct sctp_nets *, int);
+
+void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *, struct sctp_nets *, struct mbuf *,
+ int, int, int);
+
+
+
+void sctp_send_cwr(struct sctp_tcb *, struct sctp_nets *, uint32_t, uint8_t);
+
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *, uint32_t, uint32_t);
+
+void
+sctp_send_deferred_reset_response(struct sctp_tcb *,
+ struct sctp_stream_reset_list *,
+ int);
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *,
+ uint32_t, uint32_t, uint32_t, uint32_t);
+int
+sctp_send_stream_reset_out_if_possible(struct sctp_tcb *, int);
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *, uint16_t , uint16_t *,
+ uint8_t, uint8_t, uint8_t, uint16_t, uint16_t, uint8_t);
+
+void
+sctp_send_abort(struct mbuf *, int, struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, uint32_t, struct mbuf *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t, uint16_t,
+#endif
+ uint32_t, uint16_t);
+
+void sctp_send_operr_to(struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, uint32_t, struct mbuf *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t, uint16_t,
+#endif
+ uint32_t, uint16_t);
+
+#endif /* _KERNEL || __Userspace__ */
+
+#if defined(_KERNEL) || defined(__Userspace__)
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+#if defined(__APPLE__) && !defined(__Userspace__)
+ int flags
+#else
+ int flags,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *p
+#elif defined(_WIN32) && !defined(__Userspace__)
+ PKTHREAD p
+#else
+#if defined(__Userspace__)
+ /* proc is a dummy in __Userspace__ and will not be passed to sctp_lower_sosend */
+#endif
+ struct proc *p
+#endif
+#endif
+);
+
+#endif
+#endif
+
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.c
new file mode 100644
index 000000000..f051fa94d
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.c
@@ -0,0 +1,8127 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 363456 2020-07-23 19:43:49Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(INET) || defined(INET6)
+#if !defined(_WIN32)
+#include <netinet/udp.h>
+#endif
+#endif
+#ifdef INET6
+#if defined(__Userspace__)
+#include "user_ip6_var.h"
+#else
+#include <netinet6/ip6_var.h>
+#endif
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/unistd.h>
+#endif
+#if defined(__Userspace__)
+#include <user_socketvar.h>
+#include <user_atomic.h>
+#if !defined(_WIN32)
+#include <netdb.h>
+#endif
+#endif
+
+#if !defined(__FreeBSD__) || defined(__Userspace__)
+struct sctp_base_info system_base_info;
+
+#endif
+/* FIX: we don't handle multiple link local scopes */
+/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
+#ifdef INET6
+int
+SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
+{
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct in6_addr tmp_a, tmp_b;
+
+ tmp_a = a->sin6_addr;
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&tmp_a, a, NULL, NULL) != 0) {
+#else
+ if (in6_embedscope(&tmp_a, a, NULL, NULL, NULL) != 0) {
+#endif
+ return (0);
+ }
+ tmp_b = b->sin6_addr;
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&tmp_b, b, NULL, NULL) != 0) {
+#else
+ if (in6_embedscope(&tmp_b, b, NULL, NULL, NULL) != 0) {
+#endif
+ return (0);
+ }
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+#elif defined(SCTP_KAME)
+ struct sockaddr_in6 tmp_a, tmp_b;
+
+ memcpy(&tmp_a, a, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ return (0);
+ }
+ memcpy(&tmp_b, b, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ return (0);
+ }
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr));
+#else
+ struct in6_addr tmp_a, tmp_b;
+
+ tmp_a = a->sin6_addr;
+ if (in6_embedscope(&tmp_a, a) != 0) {
+ return (0);
+ }
+ tmp_b = b->sin6_addr;
+ if (in6_embedscope(&tmp_b, b) != 0) {
+ return (0);
+ }
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+#endif
+#else
+ return (IN6_ARE_ADDR_EQUAL(&(a->sin6_addr), &(b->sin6_addr)));
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+}
+#endif
+
+void
+sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
+{
+ /*
+ * We really don't need to lock this, but I will just because it
+ * does not hurt.
+ */
+ SCTP_INP_INFO_RLOCK();
+ spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep);
+ spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc);
+ spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr);
+ spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr);
+ spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk);
+ spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq);
+ spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq);
+ spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks);
+ SCTP_INP_INFO_RUNLOCK();
+}
+
+/*-
+ * Addresses are added to VRF's (Virtual Router's). For BSD we
+ * have only the default VRF 0. We maintain a hash list of
+ * VRF's. Each VRF has its own list of sctp_ifn's. Each of
+ * these has a list of addresses. When we add a new address
+ * to a VRF we lookup the ifn/ifn_index, if the ifn does
+ * not exist we create it and add it to the list of IFN's
+ * within the VRF. Once we have the sctp_ifn, we add the
+ * address to the list. So we look something like:
+ *
+ * hash-vrf-table
+ * vrf-> ifn-> ifn -> ifn
+ * vrf |
+ * ... +--ifa-> ifa -> ifa
+ * vrf
+ *
+ * We keep these separate lists since the SCTP subsystem will
+ * point to these from its source address selection nets structure.
+ * When an address is deleted it does not happen right away on
+ * the SCTP side, it gets scheduled. What we do when a
+ * delete happens is immediately remove the address from
+ * the master list and decrement the refcount. As our
+ * addip iterator works through and frees the src address
+ * selection pointing to the sctp_ifa, eventually the refcount
+ * will reach 0 and we will delete it. Note that it is assumed
+ * that any locking on system level ifn/ifa is done at the
+ * caller of these functions and these routines will only
+ * lock the SCTP structures as they add or delete things.
+ *
+ * Other notes on VRF concepts.
+ * - An endpoint can be in multiple VRF's
+ * - An association lives within a VRF and only one VRF.
+ * - Any incoming packet we can deduce the VRF for by
+ * looking at the mbuf/pak inbound (for BSD its VRF=0 :D)
+ * - Any downward send call or connect call must supply the
+ * VRF via ancillary data or via some sort of set default
+ * VRF socket option call (again for BSD no brainer since
+ * the VRF is always 0).
+ * - An endpoint may add multiple VRF's to it.
+ * - Listening sockets can accept associations in any
+ * of the VRF's they are in but the assoc will end up
+ * in only one VRF (gotten from the packet or connect/send).
+ *
+ */
+
+struct sctp_vrf *
+sctp_allocate_vrf(int vrf_id)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_vrflist *bucket;
+
+ /* First allocate the VRF structure */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf) {
+ /* Already allocated */
+ return (vrf);
+ }
+ SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf),
+ SCTP_M_VRF);
+ if (vrf == NULL) {
+ /* No memory */
+#ifdef INVARIANTS
+ panic("No memory for VRF:%d", vrf_id);
+#endif
+ return (NULL);
+ }
+ /* setup the VRF */
+ memset(vrf, 0, sizeof(struct sctp_vrf));
+ vrf->vrf_id = vrf_id;
+ LIST_INIT(&vrf->ifnlist);
+ vrf->total_ifa_count = 0;
+ vrf->refcount = 0;
+ /* now also setup table ids */
+ SCTP_INIT_VRF_TABLEID(vrf);
+ /* Init the HASH of addresses */
+ vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE,
+ &vrf->vrf_addr_hashmark);
+ if (vrf->vrf_addr_hash == NULL) {
+ /* No memory */
+#ifdef INVARIANTS
+ panic("No memory for VRF:%d", vrf_id);
+#endif
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ return (NULL);
+ }
+
+ /* Add it to the hash table */
+ bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+ LIST_INSERT_HEAD(bucket, vrf, next_vrf);
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+ return (vrf);
+}
+
+
+struct sctp_ifn *
+sctp_find_ifn(void *ifn, uint32_t ifn_index)
+{
+ struct sctp_ifn *sctp_ifnp;
+ struct sctp_ifnlist *hash_ifn_head;
+
+ /* We assume the lock is held for the addresses
+ * if that's wrong problems could occur :-)
+ */
+ SCTP_IPI_ADDR_LOCK_ASSERT();
+ hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+ LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) {
+ if (sctp_ifnp->ifn_index == ifn_index) {
+ return (sctp_ifnp);
+ }
+ if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) {
+ return (sctp_ifnp);
+ }
+ }
+ return (NULL);
+}
+
+
+struct sctp_vrf *
+sctp_find_vrf(uint32_t vrf_id)
+{
+ struct sctp_vrflist *bucket;
+ struct sctp_vrf *liste;
+
+ bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+ LIST_FOREACH(liste, bucket, next_vrf) {
+ if (vrf_id == liste->vrf_id) {
+ return (liste);
+ }
+ }
+ return (NULL);
+}
+
+
+void
+sctp_free_vrf(struct sctp_vrf *vrf)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) {
+ if (vrf->vrf_addr_hash) {
+ SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+ vrf->vrf_addr_hash = NULL;
+ }
+ /* We zero'd the count */
+ LIST_REMOVE(vrf, next_vrf);
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+ }
+}
+
+
+void
+sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) {
+ /* We zero'd the count */
+ if (sctp_ifnp->vrf) {
+ sctp_free_vrf(sctp_ifnp->vrf);
+ }
+ SCTP_FREE(sctp_ifnp, SCTP_M_IFN);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+ }
+}
+
+
+void
+sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu)
+{
+ struct sctp_ifn *sctp_ifnp;
+
+ sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index);
+ if (sctp_ifnp != NULL) {
+ sctp_ifnp->ifn_mtu = mtu;
+ }
+}
+
+
+void
+sctp_free_ifa(struct sctp_ifa *sctp_ifap)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) {
+ /* We zero'd the count */
+ if (sctp_ifap->ifn_p) {
+ sctp_free_ifn(sctp_ifap->ifn_p);
+ }
+ SCTP_FREE(sctp_ifap, SCTP_M_IFA);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+ }
+}
+
+
+static void
+sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock)
+{
+ struct sctp_ifn *found;
+
+ found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index);
+ if (found == NULL) {
+ /* Not in the list.. sorry */
+ return;
+ }
+ if (hold_addr_lock == 0) {
+ SCTP_IPI_ADDR_WLOCK();
+ } else {
+ SCTP_IPI_ADDR_WLOCK_ASSERT();
+ }
+ LIST_REMOVE(sctp_ifnp, next_bucket);
+ LIST_REMOVE(sctp_ifnp, next_ifn);
+ if (hold_addr_lock == 0) {
+ SCTP_IPI_ADDR_WUNLOCK();
+ }
+ /* Take away the reference, and possibly free it */
+ sctp_free_ifn(sctp_ifnp);
+}
+
+
+void
+sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr,
+ const char *if_name, uint32_t ifn_index)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap;
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out;
+
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+ goto out;
+ }
+ if (sctp_ifap->ifn_p == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n");
+ goto out;
+ }
+ if (if_name) {
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+ sctp_ifap->ifn_p->ifn_name, if_name);
+ goto out;
+ }
+ } else {
+ if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+ sctp_ifap->ifn_p->ifn_index, ifn_index);
+ goto out;
+ }
+ }
+
+ sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID);
+ sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ out:
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+
+void
+sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr,
+ const char *if_name, uint32_t ifn_index)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap;
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out;
+
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+ goto out;
+ }
+ if (sctp_ifap->ifn_p == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n");
+ goto out;
+ }
+ if (if_name) {
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+ sctp_ifap->ifn_p->ifn_name, if_name);
+ goto out;
+ }
+ } else {
+ if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+ sctp_ifap->ifn_p->ifn_index, ifn_index);
+ goto out;
+ }
+ }
+
+ sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE);
+ sctp_ifap->localifa_flags |= SCTP_ADDR_VALID;
+ out:
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+
+/*-
+ * Add an ifa to an ifn.
+ * Register the interface as necessary.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap)
+{
+ int ifa_af;
+
+ LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+ /* update address counts */
+ sctp_ifnp->ifa_count++;
+ ifa_af = sctp_ifap->address.sa.sa_family;
+ switch (ifa_af) {
+#ifdef INET
+ case AF_INET:
+ sctp_ifnp->num_v4++;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sctp_ifnp->num_v6++;
+ break;
+#endif
+ default:
+ break;
+ }
+ if (sctp_ifnp->ifa_count == 1) {
+ /* register the new interface */
+ sctp_ifnp->registered_af = ifa_af;
+ }
+}
+
+
+/*-
+ * Remove an ifa from its ifn.
+ * If no more addresses exist, remove the ifn too. Otherwise, re-register
+ * the interface based on the remaining address families left.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap)
+{
+ LIST_REMOVE(sctp_ifap, next_ifa);
+ if (sctp_ifap->ifn_p) {
+ /* update address counts */
+ sctp_ifap->ifn_p->ifa_count--;
+ switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ sctp_ifap->ifn_p->num_v4--;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sctp_ifap->ifn_p->num_v6--;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) {
+ /* remove the ifn, possibly freeing it */
+ sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED);
+ } else {
+ /* re-register address family type, if needed */
+ if ((sctp_ifap->ifn_p->num_v6 == 0) &&
+ (sctp_ifap->ifn_p->registered_af == AF_INET6)) {
+ sctp_ifap->ifn_p->registered_af = AF_INET;
+ } else if ((sctp_ifap->ifn_p->num_v4 == 0) &&
+ (sctp_ifap->ifn_p->registered_af == AF_INET)) {
+ sctp_ifap->ifn_p->registered_af = AF_INET6;
+ }
+ /* free the ifn refcount */
+ sctp_free_ifn(sctp_ifap->ifn_p);
+ }
+ sctp_ifap->ifn_p = NULL;
+ }
+}
+
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
+ uint32_t ifn_type, const char *if_name, void *ifa,
+ struct sockaddr *addr, uint32_t ifa_flags,
+ int dynamic_add)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifnp, *new_sctp_ifnp;
+ struct sctp_ifa *sctp_ifap, *new_sctp_ifap;
+ struct sctp_ifalist *hash_addr_head;
+ struct sctp_ifnlist *hash_ifn_head;
+ uint32_t hash_of_addr;
+ int new_ifn_af = 0;
+
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+ SCTP_MALLOC(new_sctp_ifnp, struct sctp_ifn *,
+ sizeof(struct sctp_ifn), SCTP_M_IFN);
+ if (new_sctp_ifnp == NULL) {
+#ifdef INVARIANTS
+ panic("No memory for IFN");
+#endif
+ return (NULL);
+ }
+ SCTP_MALLOC(new_sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA);
+ if (new_sctp_ifap == NULL) {
+#ifdef INVARIANTS
+ panic("No memory for IFA");
+#endif
+ SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN);
+ return (NULL);
+ }
+
+ SCTP_IPI_ADDR_WLOCK();
+ sctp_ifnp = sctp_find_ifn(ifn, ifn_index);
+ if (sctp_ifnp) {
+ vrf = sctp_ifnp->vrf;
+ } else {
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ vrf = sctp_allocate_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_WUNLOCK();
+ SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN);
+ SCTP_FREE(new_sctp_ifap, SCTP_M_IFA);
+ return (NULL);
+ }
+ }
+ }
+ if (sctp_ifnp == NULL) {
+ /* build one and add it, can't hold lock
+ * until after malloc done though.
+ */
+ sctp_ifnp = new_sctp_ifnp;
+ new_sctp_ifnp = NULL;
+ memset(sctp_ifnp, 0, sizeof(struct sctp_ifn));
+ sctp_ifnp->ifn_index = ifn_index;
+ sctp_ifnp->ifn_p = ifn;
+ sctp_ifnp->ifn_type = ifn_type;
+ sctp_ifnp->refcount = 0;
+ sctp_ifnp->vrf = vrf;
+ atomic_add_int(&vrf->refcount, 1);
+ sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family);
+ if (if_name != NULL) {
+ SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name);
+ } else {
+ SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown");
+ }
+ hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+ LIST_INIT(&sctp_ifnp->ifalist);
+ LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket);
+ LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn);
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+ new_ifn_af = 1;
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap) {
+ /* Hmm, it already exists? */
+ if ((sctp_ifap->ifn_p) &&
+ (sctp_ifap->ifn_p->ifn_index == ifn_index)) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n",
+ sctp_ifap->ifn_p->ifn_name, ifn_index,
+ (void *)sctp_ifap);
+ if (new_ifn_af) {
+ /* Remove the created one that we don't want */
+ sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED);
+ }
+ if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) {
+ /* easy to solve, just switch back to active */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n");
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+ }
+ exit_stage_left:
+ SCTP_IPI_ADDR_WUNLOCK();
+ if (new_sctp_ifnp != NULL) {
+ SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN);
+ }
+ SCTP_FREE(new_sctp_ifap, SCTP_M_IFA);
+ return (sctp_ifap);
+ } else {
+ if (sctp_ifap->ifn_p) {
+ /*
+ * The last IFN gets the address, remove the
+ * old one
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n",
+ (void *)sctp_ifap, sctp_ifap->ifn_p->ifn_name,
+ sctp_ifap->ifn_p->ifn_index, if_name,
+ ifn_index);
+ /* remove the address from the old ifn */
+ sctp_remove_ifa_from_ifn(sctp_ifap);
+ /* move the address over to the new ifn */
+ sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+ goto exit_stage_left;
+ } else {
+ /* repair ifnp which was NULL ? */
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+ SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n",
+ (void *)sctp_ifnp, (void *)sctp_ifap);
+ sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+ }
+ goto exit_stage_left;
+ }
+ }
+ sctp_ifap = new_sctp_ifap;
+ memset(sctp_ifap, 0, sizeof(struct sctp_ifa));
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifnp->refcount, 1);
+ sctp_ifap->vrf_id = vrf_id;
+ sctp_ifap->ifa = ifa;
+#ifdef HAVE_SA_LEN
+ memcpy(&sctp_ifap->address, addr, addr->sa_len);
+#else
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in6));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_conn));
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+#endif
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE;
+ sctp_ifap->flags = ifa_flags;
+ /* Set scope */
+ switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = &sctp_ifap->address.sin;
+ if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+ (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
+ sctp_ifap->src_is_loop = 1;
+ }
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ sctp_ifap->src_is_priv = 1;
+ }
+ sctp_ifnp->num_v4++;
+ if (new_ifn_af)
+ new_ifn_af = AF_INET;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* ok to use deprecated addresses? */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = &sctp_ifap->address.sin6;
+ if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+ (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
+ sctp_ifap->src_is_loop = 1;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ sctp_ifap->src_is_priv = 1;
+ }
+ sctp_ifnp->num_v6++;
+ if (new_ifn_af)
+ new_ifn_af = AF_INET6;
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (new_ifn_af)
+ new_ifn_af = AF_CONN;
+ break;
+#endif
+ default:
+ new_ifn_af = 0;
+ break;
+ }
+ hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa);
+
+ if ((sctp_ifap->src_is_priv == 0) &&
+ (sctp_ifap->src_is_loop == 0)) {
+ sctp_ifap->src_is_glob = 1;
+ }
+ hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+ LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket);
+ sctp_ifap->refcount = 1;
+ LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+ sctp_ifnp->ifa_count++;
+ vrf->total_ifa_count++;
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+ if (new_ifn_af) {
+ sctp_ifnp->registered_af = new_ifn_af;
+ }
+ SCTP_IPI_ADDR_WUNLOCK();
+ if (new_sctp_ifnp != NULL) {
+ SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN);
+ }
+
+ if (dynamic_add) {
+ /* Bump up the refcount so that when the timer
+ * completes it will drop back down.
+ */
+ struct sctp_laddr *wi;
+
+ atomic_add_int(&sctp_ifap->refcount, 1);
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ /*
+ * Gak, what can we do? We have lost an address
+ * change can you say HOSED?
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+ /* Opps, must decrement the count */
+ sctp_del_addr_from_vrf(vrf_id, addr, ifn_index,
+ if_name);
+ return (NULL);
+ }
+ SCTP_INCR_LADDR_COUNT();
+ memset(wi, 0, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = sctp_ifap;
+ wi->action = SCTP_ADD_IP_ADDRESS;
+
+ SCTP_WQ_ADDR_LOCK();
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ SCTP_WQ_ADDR_UNLOCK();
+ } else {
+ /* it's ready for use */
+ sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ return (sctp_ifap);
+}
+
+void
+sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr,
+ uint32_t ifn_index, const char *if_name)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap = NULL;
+
+ SCTP_IPI_ADDR_WLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out_now;
+ }
+
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap) {
+ /* Validate the delete */
+ if (sctp_ifap->ifn_p) {
+ int valid = 0;
+ /*-
+ * The name has priority over the ifn_index
+ * if its given.
+ */
+ if (if_name) {
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) {
+ /* They match its a correct delete */
+ valid = 1;
+ }
+ }
+ if (!valid) {
+ /* last ditch check ifn_index */
+ if (ifn_index == sctp_ifap->ifn_p->ifn_index) {
+ valid = 1;
+ }
+ }
+ if (!valid) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n",
+ ifn_index, ((if_name == NULL) ? "NULL" : if_name));
+ SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n",
+ sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name);
+ SCTP_IPI_ADDR_WUNLOCK();
+ return;
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap);
+ sctp_ifap->localifa_flags &= SCTP_ADDR_VALID;
+ /*
+ * We don't set the flag. This means that the structure will
+ * hang around in EP's that have bound specific to it until
+ * they close. This gives us TCP like behavior if someone
+ * removes an address (or for that matter adds it right back).
+ */
+ /* sctp_ifap->localifa_flags |= SCTP_BEING_DELETED; */
+ vrf->total_ifa_count--;
+ LIST_REMOVE(sctp_ifap, next_bucket);
+ sctp_remove_ifa_from_ifn(sctp_ifap);
+ }
+#ifdef SCTP_DEBUG
+ else {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:",
+ ifn_index);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+ }
+#endif
+
+ out_now:
+ SCTP_IPI_ADDR_WUNLOCK();
+ if (sctp_ifap) {
+ struct sctp_laddr *wi;
+
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ /*
+ * Gak, what can we do? We have lost an address
+ * change can you say HOSED?
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+
+ /* Oops, must decrement the count */
+ sctp_free_ifa(sctp_ifap);
+ return;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ memset(wi, 0, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = sctp_ifap;
+ wi->action = SCTP_DEL_IP_ADDRESS;
+ SCTP_WQ_ADDR_LOCK();
+ /*
+ * Should this really be a tailq? As it is we will process the
+ * newest first :-0
+ */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ SCTP_WQ_ADDR_UNLOCK();
+ }
+ return;
+}
+
+
+static int
+sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
+{
+ int loopback_scope;
+#if defined(INET)
+ int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ int conn_addr_legal;
+#endif
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+ ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+ ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ local_scope = stcb->asoc.scope.local_scope;
+ site_scope = stcb->asoc.scope.site_scope;
+ ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ /* no vrf, no addresses */
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+ }
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa))) {
+ /* We allow pending addresses, where we
+ * have sent an asconf-add to be considered
+ * valid.
+ */
+ continue;
+ }
+ if (sctp_ifa->address.sa.sa_family != to->sa_family) {
+ continue;
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = &sctp_ifa->address.sin;
+ rsin = (struct sockaddr_in *)to;
+ if ((ipv4_local_scope == 0) &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6, *rsin6;
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+ struct sockaddr_in6 lsa6;
+#endif
+ sin6 = &sctp_ifa->address.sin6;
+ rsin6 = (struct sockaddr_in6 *)to;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+ if (sa6_recoverscope(sin6) != 0)
+ continue;
+#else
+ lsa6 = *sin6;
+ if (in6_recoverscope(&lsa6,
+ &lsa6.sin6_addr,
+ NULL))
+ continue;
+ sin6 = &lsa6;
+#endif /* SCTP_KAME */
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (conn_addr_legal) {
+ struct sockaddr_conn *sconn, *rsconn;
+
+ sconn = &sctp_ifa->address.sconn;
+ rsconn = (struct sockaddr_conn *)to;
+ if (sconn->sconn_addr == rsconn->sconn_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n");
+ continue;
+ }
+ if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+ (!sctp_is_addr_pending(stcb, laddr->ifa))) {
+ /* We allow pending addresses, where we
+ * have sent an asconf-add to be considered
+ * valid.
+ */
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family != to->sa_family) {
+ continue;
+ }
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = &laddr->ifa->address.sin;
+ rsin = (struct sockaddr_in *)to;
+ if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = &laddr->ifa->address.sin6;
+ rsin6 = (struct sockaddr_in6 *)to;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ break;
+ }
+
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn, *rsconn;
+
+ sconn = &laddr->ifa->address.sconn;
+ rsconn = (struct sockaddr_conn *)to;
+ if (sconn->sconn_addr == rsconn->sconn_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+}
+
+
+static struct sctp_tcb *
+sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
+ struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ /**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */
+ /*
+ * If we support the TCP model, then we must now dig through to see
+ * if we can find our endpoint in the list of tcp ep's.
+ */
+ uint16_t lport, rport;
+ struct sctppcbhead *ephead;
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+#ifdef SCTP_MVRF
+ int fnd, i;
+#endif
+
+ if ((to == NULL) || (from == NULL)) {
+ return (NULL);
+ }
+
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (from->sa_family == AF_INET) {
+ lport = ((struct sockaddr_in *)to)->sin_port;
+ rport = ((struct sockaddr_in *)from)->sin_port;
+ } else {
+ return (NULL);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (from->sa_family == AF_INET6) {
+ lport = ((struct sockaddr_in6 *)to)->sin6_port;
+ rport = ((struct sockaddr_in6 *)from)->sin6_port;
+ } else {
+ return (NULL);
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (from->sa_family == AF_CONN) {
+ lport = ((struct sockaddr_conn *)to)->sconn_port;
+ rport = ((struct sockaddr_conn *)from)->sconn_port;
+ } else {
+ return (NULL);
+ }
+ break;
+#endif
+ default:
+ return (NULL);
+ }
+ ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+ /*
+ * Ok now for each of the guys in this bucket we must look and see:
+ * - Does the remote port match. - Does there single association's
+ * addresses match this address (to). If so we update p_ep to point
+ * to this ep and return the tcb from it.
+ */
+ LIST_FOREACH(inp, ephead, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (lport != inp->sctp_lport) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)to;
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)to;
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ break;
+ }
+#endif
+ default:
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#endif
+#ifdef SCTP_MVRF
+ fnd = 0;
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (inp->m_vrf_ids[i] == vrf_id) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (fnd == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#else
+ if (inp->def_vrf_id != vrf_id) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#endif
+ /* check to see if the ep has one of the addresses */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* We are NOT bound all, so look further */
+ int match = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__);
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n");
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family ==
+ to->sa_family) {
+ /* see if it matches */
+#ifdef INET
+ if (from->sa_family == AF_INET) {
+ struct sockaddr_in *intf_addr, *sin;
+
+ intf_addr = &laddr->ifa->address.sin;
+ sin = (struct sockaddr_in *)to;
+ if (sin->sin_addr.s_addr ==
+ intf_addr->sin_addr.s_addr) {
+ match = 1;
+ break;
+ }
+ }
+#endif
+#ifdef INET6
+ if (from->sa_family == AF_INET6) {
+ struct sockaddr_in6 *intf_addr6;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ to;
+ intf_addr6 = &laddr->ifa->address.sin6;
+
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
+ match = 1;
+ break;
+ }
+ }
+#endif
+#if defined(__Userspace__)
+ if (from->sa_family == AF_CONN) {
+ struct sockaddr_conn *intf_addr, *sconn;
+
+ intf_addr = &laddr->ifa->address.sconn;
+ sconn = (struct sockaddr_conn *)to;
+ if (sconn->sconn_addr ==
+ intf_addr->sconn_addr) {
+ match = 1;
+ break;
+ }
+ }
+#endif
+ }
+ }
+ if (match == 0) {
+ /* This endpoint does not have this address */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ }
+ /*
+ * Ok if we hit here the ep has the address, does it hold
+ * the tcb?
+ */
+ /* XXX: Why don't we TAILQ_FOREACH through sctp_asoc_list? */
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (!sctp_does_stcb_own_this_addr(stcb, to)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (!sctp_does_stcb_own_this_addr(stcb, to)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* Does this TCB have a matching address? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+
+ if (net->ro._l_addr.sa.sa_family != from->sa_family) {
+ /* not the same family, can't be a match */
+ continue;
+ }
+ switch (from->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ rsin = (struct sockaddr_in *)from;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /* Update the endpoint pointer */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)from;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /* Update the endpoint pointer */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn, *rsconn;
+
+ sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+ rsconn = (struct sockaddr_conn *)from;
+ if (sconn->sconn_addr == rsconn->sconn_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /* Update the endpoint pointer */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+
+/*
+ * rules for use
+ *
+ * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
+ * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
+ * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
+ * NULL.
+ */
+
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
+ struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
+{
+ struct sctpasochead *head;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net;
+ uint16_t rport;
+
+ inp = *inp_p;
+ switch (remote->sa_family) {
+#ifdef INET
+ case AF_INET:
+ rport = (((struct sockaddr_in *)remote)->sin_port);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ rport = (((struct sockaddr_in6 *)remote)->sin6_port);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ rport = (((struct sockaddr_conn *)remote)->sconn_port);
+ break;
+#endif
+ default:
+ return (NULL);
+ }
+ if (locked_tcb) {
+ /*
+ * UN-lock so we can do proper locking here this occurs when
+ * called from load_addresses_from_init.
+ */
+ atomic_add_int(&locked_tcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ SCTP_INP_INFO_RLOCK();
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*-
+ * Now either this guy is our listener or it's the
+ * connector. If it is the one that issued the connect, then
+ * it's only chance is to be the first TCB in the list. If
+ * it is the acceptor, then do the special_lookup to hash
+ * and find the real inp.
+ */
+ if ((inp->sctp_socket) && SCTP_IS_LISTENING(inp)) {
+ /* to is peer addr, from is my addr */
+#ifndef SCTP_MVRF
+ stcb = sctp_tcb_special_locate(inp_p, remote, local,
+ netp, inp->def_vrf_id);
+ if ((stcb != NULL) && (locked_tcb == NULL)) {
+ /* we have a locked tcb, lower refcount */
+ SCTP_INP_DECR_REF(inp);
+ }
+ if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+ SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+ SCTP_TCB_LOCK(locked_tcb);
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+ }
+#else
+ /*-
+ * MVRF is tricky, we must look in every VRF
+ * the endpoint has.
+ */
+ int i;
+
+ for (i = 0; i < inp->num_vrfs; i++) {
+ stcb = sctp_tcb_special_locate(inp_p, remote, local,
+ netp, inp->m_vrf_ids[i]);
+ if ((stcb != NULL) && (locked_tcb == NULL)) {
+ /* we have a locked tcb, lower refcount */
+ SCTP_INP_DECR_REF(inp);
+ break;
+ }
+ if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+ SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+ SCTP_TCB_LOCK(locked_tcb);
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+ break;
+ }
+ }
+#endif
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto null_return;
+ }
+ SCTP_TCB_LOCK(stcb);
+
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ /* now look at the list of remote addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ switch (remote->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn, *rsconn;
+
+ sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+ rsconn = (struct sockaddr_conn *)remote;
+ if (sconn->sconn_addr == rsconn->sconn_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
+ inp->sctp_hashmark)];
+ LIST_FOREACH(stcb, head, sctp_tcbhash) {
+ if (stcb->rport != rport) {
+ /* remote port does not match */
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ /* now look at the list of remote addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ switch (remote->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ &net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn, *rsconn;
+
+ sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+ rsconn = (struct sockaddr_conn *)remote;
+ if (sconn->sconn_addr == rsconn->sconn_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+null_return:
+ /* clean up for returning null */
+ if (locked_tcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ /* not found */
+ return (NULL);
+}
+
+
+/*
+ * Find an association for a specific endpoint using the association id given
+ * out in the COMM_UP notification
+ */
+struct sctp_tcb *
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ /*
+ * Use my the assoc_id to find a endpoint
+ */
+ struct sctpasochead *head;
+ struct sctp_tcb *stcb;
+ uint32_t id;
+
+ if (inp == NULL) {
+ SCTP_PRINTF("TSNH ep_associd\n");
+ return (NULL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_PRINTF("TSNH ep_associd0\n");
+ return (NULL);
+ }
+ id = (uint32_t)asoc_id;
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+ if (head == NULL) {
+ /* invalid id TSNH */
+ SCTP_PRINTF("TSNH ep_associd1\n");
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbasocidhash) {
+ if (stcb->asoc.assoc_id == id) {
+ if (inp != stcb->sctp_ep) {
+ /*
+ * some other guy has the same id active (id
+ * collision ??).
+ */
+ SCTP_PRINTF("TSNH ep_associd2\n");
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ continue;
+ }
+ if (want_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ return (stcb);
+ }
+ }
+ return (NULL);
+}
+
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_RLOCK(inp);
+ stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock);
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+}
+
+
+/*
+ * Endpoint probe expects that the INP_INFO is locked.
+ */
+static struct sctp_inpcb *
+sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
+ uint16_t lport, uint32_t vrf_id)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in6 *intf_addr6;
+#endif
+#if defined(__Userspace__)
+ struct sockaddr_conn *sconn;
+#endif
+#ifdef SCTP_MVRF
+ int i;
+#endif
+ int fnd;
+
+#ifdef INET
+ sin = NULL;
+#endif
+#ifdef INET6
+ sin6 = NULL;
+#endif
+#if defined(__Userspace__)
+ sconn = NULL;
+#endif
+ switch (nam->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)nam;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)nam;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ sconn = (struct sockaddr_conn *)nam;
+ break;
+#endif
+ default:
+ /* unsupported family */
+ return (NULL);
+ }
+
+ if (head == NULL)
+ return (NULL);
+
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
+ (inp->sctp_lport == lport)) {
+ /* got it */
+ switch (nam->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* IPv4 on a IPv6 socket with ONLY IPv6 set */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ /* A V6 address and the endpoint is NOT bound V6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#endif
+ break;
+#endif
+ default:
+ break;
+ }
+ /* does a VRF id match? */
+ fnd = 0;
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (inp->m_vrf_ids[i] == vrf_id) {
+ fnd = 1;
+ break;
+ }
+ }
+#else
+ if (inp->def_vrf_id == vrf_id)
+ fnd = 1;
+#endif
+
+ SCTP_INP_RUNLOCK(inp);
+ if (!fnd)
+ continue;
+ return (inp);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ switch (nam->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (sin->sin_addr.s_addr == INADDR_ANY) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (sconn->sconn_addr == NULL) {
+ return (NULL);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ /*
+ * ok, not bound to all so see if we can find a EP bound to this
+ * address.
+ */
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /*
+ * Ok this could be a likely candidate, look at all of its
+ * addresses
+ */
+ if (inp->sctp_lport != lport) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* does a VRF id match? */
+ fnd = 0;
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (inp->m_vrf_ids[i] == vrf_id) {
+ fnd = 1;
+ break;
+ }
+ }
+#else
+ if (inp->def_vrf_id == vrf_id)
+ fnd = 1;
+
+#endif
+ if (!fnd) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+ __func__);
+ continue;
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ",
+ (void *)laddr->ifa);
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n");
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family == nam->sa_family) {
+ /* possible, see if it matches */
+ switch (nam->sa_family) {
+#ifdef INET
+ case AF_INET:
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (sin == NULL) {
+ /* TSNH */
+ break;
+ }
+#endif
+ if (sin->sin_addr.s_addr ==
+ laddr->ifa->address.sin.sin_addr.s_addr) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ intf_addr6 = &laddr->ifa->address.sin6;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (sconn->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ break;
+#endif
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+
+static struct sctp_inpcb *
+sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
+{
+ struct sctppcbhead *head;
+ struct sctp_inpcb *t_inp;
+#ifdef SCTP_MVRF
+ int i;
+#endif
+ int fnd;
+
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+ SCTP_BASE_INFO(hashmark))];
+ LIST_FOREACH(t_inp, head, sctp_hash) {
+ if (t_inp->sctp_lport != lport) {
+ continue;
+ }
+ /* is it in the VRF in question */
+ fnd = 0;
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (t_inp->m_vrf_ids[i] == vrf_id) {
+ fnd = 1;
+ break;
+ }
+ }
+#else
+ if (t_inp->def_vrf_id == vrf_id)
+ fnd = 1;
+#endif
+ if (!fnd)
+ continue;
+
+ /* This one is in use. */
+ /* check the v6/v4 binding issue */
+ if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(t_inp)) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* collision in V6 space */
+ return (t_inp);
+ } else {
+ /* inp is BOUND_V4 no conflict */
+ continue;
+ }
+ } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* t_inp is bound v4 and v6, conflict always */
+ return (t_inp);
+ } else {
+ /* t_inp is bound only V4 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* no conflict */
+ continue;
+ }
+ /* else fall through to conflict */
+ }
+ return (t_inp);
+ }
+ return (NULL);
+}
+
+
+int
+sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp)
+{
+ /* For 1-2-1 with port reuse */
+ struct sctppcbhead *head;
+ struct sctp_inpcb *tinp, *ninp;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+ /* only works with port reuse on */
+ return (-1);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
+ return (0);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_WLOCK();
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport,
+ SCTP_BASE_INFO(hashmark))];
+ /* Kick out all non-listeners to the TCP hash */
+ LIST_FOREACH_SAFE(tinp, head, sctp_hash, ninp) {
+ if (tinp->sctp_lport != inp->sctp_lport) {
+ continue;
+ }
+ if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ continue;
+ }
+ if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ continue;
+ }
+ if (SCTP_IS_LISTENING(tinp)) {
+ continue;
+ }
+ SCTP_INP_WLOCK(tinp);
+ LIST_REMOVE(tinp, sctp_hash);
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))];
+ tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+ LIST_INSERT_HEAD(head, tinp, sctp_hash);
+ SCTP_INP_WUNLOCK(tinp);
+ }
+ SCTP_INP_WLOCK(inp);
+ /* Pull from where he was */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL;
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))];
+ LIST_INSERT_HEAD(head, inp, sctp_hash);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_RLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+}
+
+
+struct sctp_inpcb *
+sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
+ uint32_t vrf_id)
+{
+ /*
+ * First we check the hash table to see if someone has this port
+ * bound with just the port.
+ */
+ struct sctp_inpcb *inp;
+ struct sctppcbhead *head;
+ int lport;
+ unsigned int i;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+#if defined(__Userspace__)
+ struct sockaddr_conn *sconn;
+#endif
+
+ switch (nam->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)nam;
+ lport = sin->sin_port;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)nam;
+ lport = sin6->sin6_port;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ sconn = (struct sockaddr_conn *)nam;
+ lport = sconn->sconn_port;
+ break;
+#endif
+ default:
+ return (NULL);
+ }
+ /*
+ * I could cheat here and just cast to one of the types but we will
+ * do it right. It also provides the check against an Unsupported
+ * type too.
+ */
+ /* Find the head of the ALLADDR chain */
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RLOCK();
+ }
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+ SCTP_BASE_INFO(hashmark))];
+ inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+
+ /*
+ * If the TCP model exists it could be that the main listening
+ * endpoint is gone but there still exists a connected socket for this
+ * guy. If so we can return the first one that we find. This may NOT
+ * be the correct one so the caller should be wary on the returned INP.
+ * Currently the only caller that sets find_tcp_pool is in bindx where
+ * we are verifying that a user CAN bind the address. He either
+ * has bound it already, or someone else has, or its open to bind,
+ * so this is good enough.
+ */
+ if (inp == NULL && find_tcp_pool) {
+ for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) {
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[i];
+ inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+ if (inp) {
+ break;
+ }
+ }
+ }
+ if (inp) {
+ SCTP_INP_INCR_REF(inp);
+ }
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RUNLOCK();
+ }
+ return (inp);
+}
+
+
+/*
+ * Find an association for an endpoint with the pointer to whom you want to
+ * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
+ * need to change the *to to some other struct like a mbuf...
+ */
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool,
+ uint32_t vrf_id)
+{
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_INFO_RLOCK();
+ if (find_tcp_pool) {
+ if (inp_p != NULL) {
+ stcb = sctp_tcb_special_locate(inp_p, from, to, netp,
+ vrf_id);
+ } else {
+ stcb = sctp_tcb_special_locate(&inp, from, to, netp,
+ vrf_id);
+ }
+ if (stcb != NULL) {
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ }
+ inp = sctp_pcb_findep(to, 0, 1, vrf_id);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ if (inp == NULL) {
+ return (NULL);
+ }
+ /*
+ * ok, we have an endpoint, now lets find the assoc for it (if any)
+ * we now place the source address or from in the to of the find
+ * endpoint call. Since in reality this chain is used from the
+ * inbound packet side.
+ */
+ if (inp_p != NULL) {
+ stcb = sctp_findassociation_ep_addr(inp_p, from, netp, to,
+ NULL);
+ } else {
+ stcb = sctp_findassociation_ep_addr(&inp, from, netp, to,
+ NULL);
+ }
+ return (stcb);
+}
+
+
+/*
+ * This routine will grub through the mbuf that is a INIT or INIT-ACK and
+ * find all addresses that the sender has specified in any address list. Each
+ * address will be used to lookup the TCB and see if one exits.
+ */
+static struct sctp_tcb *
+sctp_findassociation_special_addr(struct mbuf *m, int offset,
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
+ struct sockaddr *dst)
+{
+ struct sctp_paramhdr *phdr, param_buf;
+#if defined(INET) || defined(INET6)
+ struct sctp_tcb *stcb;
+ uint16_t ptype;
+#endif
+ uint16_t plen;
+#ifdef INET
+ struct sockaddr_in sin4;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+
+#ifdef INET
+ memset(&sin4, 0, sizeof(sin4));
+#ifdef HAVE_SIN_LEN
+ sin4.sin_len = sizeof(sin4);
+#endif
+ sin4.sin_family = AF_INET;
+ sin4.sin_port = sh->src_port;
+#endif
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = sh->src_port;
+#endif
+
+ offset += sizeof(struct sctp_init_chunk);
+
+ phdr = sctp_get_next_param(m, offset, &param_buf, sizeof(param_buf));
+ while (phdr != NULL) {
+ /* now we must see if we want the parameter */
+#if defined(INET) || defined(INET6)
+ ptype = ntohs(phdr->param_type);
+#endif
+ plen = ntohs(phdr->param_length);
+ if (plen == 0) {
+ break;
+ }
+#ifdef INET
+ if (ptype == SCTP_IPV4_ADDRESS &&
+ plen == sizeof(struct sctp_ipv4addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv4addr_param ip4_param, *p4;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip4_param, sizeof(ip4_param));
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
+ /* look it up */
+ stcb = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin4, netp, dst, NULL);
+ if (stcb != NULL) {
+ return (stcb);
+ }
+ }
+#endif
+#ifdef INET6
+ if (ptype == SCTP_IPV6_ADDRESS &&
+ plen == sizeof(struct sctp_ipv6addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv6addr_param ip6_param, *p6;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip6_param, sizeof(ip6_param));
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
+ /* look it up */
+ stcb = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin6, netp, dst, NULL);
+ if (stcb != NULL) {
+ return (stcb);
+ }
+ }
+#endif
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(m, offset, &param_buf,
+ sizeof(param_buf));
+ }
+ return (NULL);
+}
+
+static struct sctp_tcb *
+sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
+ uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag)
+{
+ /*
+ * Use my vtag to hash. If we find it we then verify the source addr
+ * is in the assoc. If all goes well we save a bit on rec of a
+ * packet.
+ */
+ struct sctpasochead *head;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+#ifdef SCTP_MVRF
+ unsigned int i;
+#endif
+
+ SCTP_INP_INFO_RLOCK();
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag,
+ SCTP_BASE_INFO(hashasocmark))];
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ continue;
+ }
+#ifdef SCTP_MVRF
+ for (i = 0; i < stcb->sctp_ep->num_vrfs; i++) {
+ if (stcb->sctp_ep->m_vrf_ids[i] == vrf_id) {
+ break;
+ }
+ }
+ if (i == stcb->sctp_ep->num_vrfs) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+#else
+ if (stcb->sctp_ep->def_vrf_id != vrf_id) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ continue;
+ }
+#endif
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ if (stcb->asoc.my_vtag == vtag) {
+ /* candidate */
+ if (stcb->rport != rport) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ /* RRS:Need toaddr check here */
+ if (sctp_does_stcb_own_this_addr(stcb, to) == 0) {
+ /* Endpoint does not own this address */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (remote_tag) {
+ /* If we have both vtags that's all we match on */
+ if (stcb->asoc.peer_vtag == remote_tag) {
+ /* If both tags match we consider it conclusive
+ * and check NO source/destination addresses
+ */
+ goto conclusive;
+ }
+ }
+ if (skip_src_check) {
+ conclusive:
+ if (from) {
+ *netp = sctp_findnet(stcb, from);
+ } else {
+ *netp = NULL; /* unknown */
+ }
+ if (inp_p)
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ net = sctp_findnet(stcb, from);
+ if (net) {
+ /* yep its him. */
+ *netp = net;
+ SCTP_STAT_INCR(sctps_vtagexpress);
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ /*
+ * not him, this should only happen in rare
+ * cases so I peg it.
+ */
+ SCTP_STAT_INCR(sctps_vtagbogus);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+}
+
+
+/*
+ * Find an association with the pointer to the inbound IP packet. This can be
+ * a IPv4 or IPv6 packet.
+ */
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *m, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_inpcb *inp;
+
+ if (sh->v_tag) {
+ /* we only go down this path if vtag is non-zero */
+ stcb = sctp_findassoc_by_vtag(src, dst, ntohl(sh->v_tag),
+ inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0);
+ if (stcb) {
+ return (stcb);
+ }
+ }
+
+ if (inp_p) {
+ stcb = sctp_findassociation_addr_sa(src, dst, inp_p, netp,
+ 1, vrf_id);
+ inp = *inp_p;
+ } else {
+ stcb = sctp_findassociation_addr_sa(src, dst, &inp, netp,
+ 1, vrf_id);
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "stcb:%p inp:%p\n", (void *)stcb, (void *)inp);
+ if (stcb == NULL && inp) {
+ /* Found a EP but not this address */
+ if ((ch->chunk_type == SCTP_INITIATION) ||
+ (ch->chunk_type == SCTP_INITIATION_ACK)) {
+ /*-
+ * special hook, we do NOT return linp or an
+ * association that is linked to an existing
+ * association that is under the TCP pool (i.e. no
+ * listener exists). The endpoint finding routine
+ * will always find a listener before examining the
+ * TCP pool.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ if (inp_p) {
+ *inp_p = NULL;
+ }
+ return (NULL);
+ }
+ stcb = sctp_findassociation_special_addr(m,
+ offset, sh, &inp, netp, dst);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "stcb is %p\n", (void *)stcb);
+ return (stcb);
+}
+
+/*
+ * lookup an association by an ASCONF lookup address.
+ * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *m, int offset,
+ struct sockaddr *dst, struct sctphdr *sh,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ struct sctp_tcb *stcb;
+ union sctp_sockstore remote_store;
+ struct sctp_paramhdr param_buf, *phdr;
+ int ptype;
+ int zero_address = 0;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ memset(&remote_store, 0, sizeof(remote_store));
+ phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
+ &param_buf, sizeof(struct sctp_paramhdr));
+ if (phdr == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n",
+ __func__);
+ return NULL;
+ }
+ ptype = (int)((uint32_t) ntohs(phdr->param_type));
+ /* get the correlation address */
+ switch (ptype) {
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ {
+ /* ipv6 address param */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
+ return NULL;
+ }
+ p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p6_buf.ph, sizeof(p6_buf));
+ if (p6 == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n",
+ __func__);
+ return (NULL);
+ }
+ sin6 = &remote_store.sin6;
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(*sin6);
+#endif
+ sin6->sin6_port = sh->src_port;
+ memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ break;
+ }
+#endif
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ {
+ /* ipv4 address param */
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
+ return NULL;
+ }
+ p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p4_buf.ph, sizeof(p4_buf));
+ if (p4 == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n",
+ __func__);
+ return (NULL);
+ }
+ sin = &remote_store.sin;
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(*sin);
+#endif
+ sin->sin_port = sh->src_port;
+ memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ break;
+ }
+#endif
+ default:
+ /* invalid address param type */
+ return NULL;
+ }
+
+ if (zero_address) {
+ stcb = sctp_findassoc_by_vtag(NULL, dst, ntohl(sh->v_tag), inp_p,
+ netp, sh->src_port, sh->dest_port, 1, vrf_id, 0);
+ if (stcb != NULL) {
+ SCTP_INP_DECR_REF(*inp_p);
+ }
+ } else {
+ stcb = sctp_findassociation_ep_addr(inp_p,
+ &remote_store.sa, netp,
+ dst, NULL);
+ }
+ return (stcb);
+}
+
+
+/*
+ * allocate a sctp_inpcb and setup a temporary binding to a port/all
+ * addresses. This way if we don't get a bind we by default pick a ephemeral
+ * port with all addresses bound.
+ */
+int
+sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
+{
+ /*
+ * we get called when a new endpoint starts up. We need to allocate
+ * the sctp_inpcb structure from the zone and init it. Mark it as
+ * unbound and find a port that we can use as an ephemeral with
+ * INADDR_ANY. If the user binds later no problem we can then add in
+ * the specific addresses. And setup the default parameters for the
+ * EP.
+ */
+ int i, error;
+ struct sctp_inpcb *inp;
+ struct sctp_pcb *m;
+ struct timeval time;
+ sctp_sharedkey_t *null_key;
+
+ error = 0;
+
+ SCTP_INP_INFO_WLOCK();
+ inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb);
+ if (inp == NULL) {
+ SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n");
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ return (ENOBUFS);
+ }
+ /* zap it */
+ memset(inp, 0, sizeof(*inp));
+
+ /* bump generations */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ inp->ip_inp.inp.inp_state = INPCB_STATE_INUSE;
+#endif
+ /* setup socket pointers */
+ inp->sctp_socket = so;
+ inp->ip_inp.inp.inp_socket = so;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ inp->ip_inp.inp.inp_cred = crhold(so->so_cred);
+#endif
+#ifdef INET6
+#if !defined(__Userspace__) && !defined(_WIN32)
+ if (INP_SOCKAF(so) == AF_INET6) {
+ if (MODULE_GLOBAL(ip6_auto_flowlabel)) {
+ inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL;
+ }
+ if (MODULE_GLOBAL(ip6_v6only)) {
+ inp->ip_inp.inp.inp_flags |= IN6P_IPV6_V6ONLY;
+ }
+ }
+#endif
+#endif
+ inp->sctp_associd_counter = 1;
+ inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT;
+ inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ inp->max_cwnd = 0;
+ inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off);
+ inp->ecn_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_ecn_enable);
+ inp->prsctp_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pr_enable);
+ inp->auth_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_auth_enable);
+ inp->asconf_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_asconf_enable);
+ inp->reconfig_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_reconfig_enable);
+ inp->nrsack_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_nrsack_enable);
+ inp->pktdrop_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pktdrop_enable);
+ inp->idata_supported = 0;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ inp->fibnum = so->so_fibnum;
+#else
+ inp->fibnum = 0;
+#endif
+#if defined(__Userspace__)
+ inp->ulp_info = NULL;
+ inp->recv_callback = NULL;
+ inp->send_callback = NULL;
+ inp->send_sb_threshold = 0;
+#endif
+ /* init the small hash table we use to track asocid <-> tcb */
+ inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark);
+ if (inp->sctp_asocidhash == NULL) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ crfree(inp->ip_inp.inp.inp_cred);
+#endif
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (ENOBUFS);
+ }
+ SCTP_INCR_EP_COUNT();
+ inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+ SCTP_INP_INFO_WUNLOCK();
+
+ so->so_pcb = (caddr_t)inp;
+
+ if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) {
+ /* UDP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ /* Be sure it is NON-BLOCKING IO for UDP */
+ /* SCTP_SET_SO_NBIO(so); */
+ } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) {
+ /* TCP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ /* Be sure we have blocking IO by default */
+ SOCK_LOCK(so);
+ SCTP_CLEAR_SO_NBIO(so);
+ SOCK_UNLOCK(so);
+ } else {
+ /*
+ * unsupported socket type (RAW, etc)- in case we missed it
+ * in protosw
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP);
+ so->so_pcb = NULL;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ crfree(inp->ip_inp.inp.inp_cred);
+#endif
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ return (EOPNOTSUPP);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ }
+ inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize),
+ &inp->sctp_hashmark);
+ if (inp->sctp_tcbhash == NULL) {
+ SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n");
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ so->so_pcb = NULL;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ crfree(inp->ip_inp.inp.inp_cred);
+#endif
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ return (ENOBUFS);
+ }
+#ifdef SCTP_MVRF
+ inp->vrf_size = SCTP_DEFAULT_VRF_SIZE;
+ SCTP_MALLOC(inp->m_vrf_ids, uint32_t *,
+ (sizeof(uint32_t) * inp->vrf_size), SCTP_M_MVRF);
+ if (inp->m_vrf_ids == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ so->so_pcb = NULL;
+ SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ crfree(inp->ip_inp.inp.inp_cred);
+#endif
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ return (ENOBUFS);
+ }
+ inp->m_vrf_ids[0] = vrf_id;
+ inp->num_vrfs = 1;
+#endif
+ inp->def_vrf_id = vrf_id;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ inp->ip_inp.inp.inpcb_mtx = lck_mtx_alloc_init(SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr);
+ if (inp->ip_inp.inp.inpcb_mtx == NULL) {
+ SCTP_PRINTF("in_pcballoc: can't alloc mutex! so=%p\n", (void *)so);
+#ifdef SCTP_MVRF
+ SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+#endif
+ SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+ so->so_pcb = NULL;
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_UNLOCK_EXC(SCTP_BASE_INFO(sctbinfo).ipi_lock);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+ return (ENOMEM);
+ }
+#elif defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+ lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+ lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).ipi_lock_grp, SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#endif
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_LOCK_INIT(inp);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp");
+#endif
+ SCTP_INP_READ_INIT(inp);
+ SCTP_ASOC_CREATE_LOCK_INIT(inp);
+ /* lock the new ep */
+ SCTP_INP_WLOCK(inp);
+
+ /* add it to the info area */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ inp->ip_inp.inp.inp_pcbinfo = &SCTP_BASE_INFO(sctbinfo);
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+ LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).listhead, &inp->ip_inp.inp, inp_list);
+#else
+ LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).ipi_listhead, &inp->ip_inp.inp, inp_list);
+#endif
+#endif
+ SCTP_INP_INFO_WUNLOCK();
+
+ TAILQ_INIT(&inp->read_queue);
+ LIST_INIT(&inp->sctp_addr_list);
+
+ LIST_INIT(&inp->sctp_asoc_list);
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+ /* TEMP CODE */
+ LIST_INIT(&inp->sctp_asoc_free_list);
+#endif
+ /* Init the timer structure for signature change */
+ SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer);
+ inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
+
+ /* now init the actual endpoint default data */
+ m = &inp->sctp_ep;
+
+ /* setup the base timeout information */
+ m->sctp_timeoutticks[SCTP_TIMER_SEND] = sctp_secs_to_ticks(SCTP_SEND_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_INIT] = sctp_secs_to_ticks(SCTP_INIT_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default));
+ m->sctp_timeoutticks[SCTP_TIMER_PMTU] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default));
+ /* all max/min max are in ms */
+ m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default);
+ m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default);
+ m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default);
+ m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default);
+ m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default);
+ m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default);
+ m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default);
+ m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default);
+ m->def_net_pf_threshold = SCTP_BASE_SYSCTL(sctp_path_pf_threshold);
+ m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
+ m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
+ m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default);
+ m->fr_max_burst = SCTP_BASE_SYSCTL(sctp_fr_max_burst_default);
+
+ m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module);
+ m->sctp_default_ss_module = SCTP_BASE_SYSCTL(sctp_default_ss_module);
+ m->max_open_streams_intome = SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default);
+ /* number of streams to pre-open on a association */
+ m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default);
+
+ m->default_mtu = 0;
+ /* Add adaptation cookie */
+ m->adaptation_layer_indicator = 0;
+ m->adaptation_layer_indicator_provided = 0;
+
+ /* seed random number generator */
+ m->random_counter = 1;
+ m->store_at = SCTP_SIGNATURE_SIZE;
+ SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers));
+ sctp_fill_random_store(m);
+
+ /* Minimum cookie size */
+ m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
+ sizeof(struct sctp_state_cookie);
+ m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
+
+ /* Setup the initial secret */
+ (void)SCTP_GETTIME_TIMEVAL(&time);
+ m->time_of_secret_change = time.tv_sec;
+
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ m->secret_key[0][i] = sctp_select_initial_TSN(m);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+
+ /* How long is a cookie good for ? */
+ m->def_cookie_life = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default));
+ /*
+ * Initialize authentication parameters
+ */
+ m->local_hmacs = sctp_default_supported_hmaclist();
+ m->local_auth_chunks = sctp_alloc_chunklist();
+ if (inp->asconf_supported) {
+ sctp_auth_add_chunk(SCTP_ASCONF, m->local_auth_chunks);
+ sctp_auth_add_chunk(SCTP_ASCONF_ACK, m->local_auth_chunks);
+ }
+ m->default_dscp = 0;
+#ifdef INET6
+ m->default_flowlabel = 0;
+#endif
+ m->port = 0; /* encapsulation disabled by default */
+ LIST_INIT(&m->shared_keys);
+ /* add default NULL key as key id 0 */
+ null_key = sctp_alloc_sharedkey();
+ sctp_insert_sharedkey(&m->shared_keys, null_key);
+ SCTP_INP_WUNLOCK(inp);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 12);
+#endif
+ return (error);
+}
+
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ uint16_t lport, rport;
+ struct sctppcbhead *head;
+ struct sctp_laddr *laddr, *oladdr;
+
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(old_inp);
+ SCTP_INP_WLOCK(new_inp);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+ new_inp->sctp_ep.time_of_secret_change =
+ old_inp->sctp_ep.time_of_secret_change;
+ memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
+ sizeof(old_inp->sctp_ep.secret_key));
+ new_inp->sctp_ep.current_secret_number =
+ old_inp->sctp_ep.current_secret_number;
+ new_inp->sctp_ep.last_secret_number =
+ old_inp->sctp_ep.last_secret_number;
+ new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
+
+ /* make it so new data pours into the new socket */
+ stcb->sctp_socket = new_inp->sctp_socket;
+ stcb->sctp_ep = new_inp;
+
+ /* Copy the port across */
+ lport = new_inp->sctp_lport = old_inp->sctp_lport;
+ rport = stcb->rport;
+ /* Pull the tcb from the old association */
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ LIST_REMOVE(stcb, sctp_tcblist);
+ if (stcb->asoc.in_asocid_hash) {
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ }
+ /* Now insert the new_inp into the TCP connected hash */
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+
+ LIST_INSERT_HEAD(head, new_inp, sctp_hash);
+ /* Its safe to access */
+ new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+
+ /* Now move the tcb into the endpoint list */
+ LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /*
+ * Question, do we even need to worry about the ep-hash since we
+ * only have one connection? Probably not :> so lets get rid of it
+ * and not suck up any kernel memory in that.
+ */
+ if (stcb->asoc.in_asocid_hash) {
+ struct sctpasochead *lhd;
+ lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
+ new_inp->hashasocidmark)];
+ LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash);
+ }
+ /* Ok. Let's restart timer. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
+ stcb, net);
+ }
+
+ SCTP_INP_INFO_WUNLOCK();
+ if (new_inp->sctp_tcbhash != NULL) {
+ SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark);
+ new_inp->sctp_tcbhash = NULL;
+ }
+ if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* Subset bound, so copy in the laddr list from the old_inp */
+ LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
+ laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (laddr == NULL) {
+ /*
+ * Gak, what can we do? This assoc is really
+ * HOSED. We probably should send an abort
+ * here.
+ */
+ SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n");
+ continue;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ memset(laddr, 0, sizeof(*laddr));
+ (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+ laddr->ifa = oladdr->ifa;
+ atomic_add_int(&laddr->ifa->refcount, 1);
+ LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
+ sctp_nxt_addr);
+ new_inp->laddr_count++;
+ if (oladdr == stcb->asoc.last_used_address) {
+ stcb->asoc.last_used_address = laddr;
+ }
+ }
+ }
+ /* Now any running timers need to be adjusted. */
+ if (stcb->asoc.dack_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.dack_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (stcb->asoc.asconf_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.asconf_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (stcb->asoc.strreset_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.strreset_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (stcb->asoc.shut_guard_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.shut_guard_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (stcb->asoc.autoclose_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.autoclose_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (stcb->asoc.delete_prim_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ stcb->asoc.delete_prim_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ /* now what about the nets? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->pmtu_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ net->pmtu_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (net->hb_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ net->hb_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ if (net->rxt_timer.ep == old_inp) {
+ SCTP_INP_DECR_REF(old_inp);
+ net->rxt_timer.ep = new_inp;
+ SCTP_INP_INCR_REF(new_inp);
+ }
+ }
+ SCTP_INP_WUNLOCK(new_inp);
+ SCTP_INP_WUNLOCK(old_inp);
+}
+
+/*
+ * insert an laddr entry with the given ifa for the desired list
+ */
+static int
+sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act)
+{
+ struct sctp_laddr *laddr;
+
+ laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (laddr == NULL) {
+ /* out of memory? */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_INCR_LADDR_COUNT();
+ memset(laddr, 0, sizeof(*laddr));
+ (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+ laddr->ifa = ifa;
+ laddr->action = act;
+ atomic_add_int(&ifa->refcount, 1);
+ /* insert it */
+ LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
+
+ return (0);
+}
+
+/*
+ * Remove an laddr entry from the local address list (on an assoc)
+ */
+static void
+sctp_remove_laddr(struct sctp_laddr *laddr)
+{
+
+ /* remove from the list */
+ LIST_REMOVE(laddr, sctp_nxt_addr);
+ sctp_free_ifa(laddr->ifa);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr);
+ SCTP_DECR_LADDR_COUNT();
+}
+
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__))
+/*
+ * Don't know why, but without this there is an unknown reference when
+ * compiling NetBSD... hmm
+ */
+extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *sin6);
+#endif
+
+
+/* sctp_ifap is used to bypass normal local address validation checks */
+int
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+ struct sctp_ifa *sctp_ifap, struct thread *p)
+#elif defined(_WIN32) && !defined(__Userspace__)
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+ struct sctp_ifa *sctp_ifap, PKTHREAD p)
+#else
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+ struct sctp_ifa *sctp_ifap, struct proc *p)
+#endif
+{
+ /* bind a ep to a socket address */
+ struct sctppcbhead *head;
+ struct sctp_inpcb *inp, *inp_tmp;
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ struct inpcb *ip_inp;
+#endif
+ int port_reuse_active = 0;
+ int bindall;
+#ifdef SCTP_MVRF
+ int i;
+#endif
+ uint16_t lport;
+ int error;
+ uint32_t vrf_id;
+
+ lport = 0;
+ bindall = 1;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ ip_inp = (struct inpcb *)so->so_pcb;
+#endif
+#ifdef SCTP_DEBUG
+ if (addr) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n",
+ ntohs(((struct sockaddr_in *)addr)->sin_port));
+ SCTPDBG(SCTP_DEBUG_PCB1, "Addr: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* already did a bind, subsequent binds NOT allowed ! */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INVARIANTS
+ if (p == NULL)
+ panic("null proc/thread");
+#endif
+#endif
+ if (addr != NULL) {
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ /* IPV6_V6ONLY socket? */
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(*sin)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#endif
+
+ sin = (struct sockaddr_in *)addr;
+ lport = sin->sin_port;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /*
+ * For LOOPBACK the prison_local_ip4() call will transmute the ip address
+ * to the proper value.
+ */
+ if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+#endif
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ bindall = 0;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(*sin6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ lport = sin6->sin6_port;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /*
+ * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address
+ * to the proper value.
+ */
+ if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr,
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+#endif
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ bindall = 0;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ /* KAME hack: embed scopeid */
+#if defined(SCTP_KAME)
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#elif defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL) != 0) {
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL, NULL) != 0) {
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#elif defined(__FreeBSD__) && !defined(__Userspace__)
+ error = scope6_check_id(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ if (error != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+#else
+ if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#endif
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ }
+#ifndef SCOPEDROUTING
+ /* this must be cleared for ifa_ifwithaddr() */
+ sin6->sin6_scope_id = 0;
+#endif /* SCOPEDROUTING */
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_conn)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ sconn = (struct sockaddr_conn *)addr;
+ lport = sconn->sconn_port;
+ if (sconn->sconn_addr != NULL) {
+ bindall = 0;
+ }
+ break;
+ }
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT);
+ return (EAFNOSUPPORT);
+ }
+ }
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ /* Setup a vrf_id to be the default for the non-bind-all case. */
+ vrf_id = inp->def_vrf_id;
+
+ /* increase our count due to the unlock we do */
+ SCTP_INP_INCR_REF(inp);
+ if (lport) {
+ /*
+ * Did the caller specify a port? if so we must see if an ep
+ * already has this one bound.
+ */
+ /* got to be root to get at low ports */
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ if (ntohs(lport) < IPPORT_RESERVED) {
+ if ((p != NULL) && ((error =
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ priv_check(p, PRIV_NETINET_RESERVEDPORT)
+#elif defined(__APPLE__) && !defined(__Userspace__)
+ suser(p->p_ucred, &p->p_acflag)
+#elif defined(__Userspace__) /* must be true to use raw socket */
+ 1
+#else
+ suser(p, 0)
+#endif
+ ) != 0)) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ }
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ if (bindall) {
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ vrf_id = inp->m_vrf_ids[i];
+#else
+ vrf_id = inp->def_vrf_id;
+#endif
+ inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+ if (inp_tmp != NULL) {
+ /*
+ * lock guy returned and lower count
+ * note that we are not bound so
+ * inp_tmp should NEVER be inp. And
+ * it is this inp (inp_tmp) that gets
+ * the reference bump, so we must
+ * lower it.
+ */
+ SCTP_INP_DECR_REF(inp_tmp);
+ /* unlock info */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ /* Ok, must be one-2-one and allowing port re-use */
+ port_reuse_active = 1;
+ goto continue_anyway;
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+#ifdef SCTP_MVRF
+ }
+#endif
+ } else {
+ inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+ if (inp_tmp != NULL) {
+ /*
+ * lock guy returned and lower count note
+ * that we are not bound so inp_tmp should
+ * NEVER be inp. And it is this inp (inp_tmp)
+ * that gets the reference bump, so we must
+ * lower it.
+ */
+ SCTP_INP_DECR_REF(inp_tmp);
+ /* unlock info */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ /* Ok, must be one-2-one and allowing port re-use */
+ port_reuse_active = 1;
+ goto continue_anyway;
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ }
+ continue_anyway:
+ SCTP_INP_WLOCK(inp);
+ if (bindall) {
+ /* verify that no lport is not used by a singleton */
+ if ((port_reuse_active == 0) &&
+ (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))) {
+ /* Sorry someone already has this one bound */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ port_reuse_active = 1;
+ } else {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ }
+ }
+ } else {
+ uint16_t first, last, candidate;
+ uint16_t count;
+ int done;
+
+#if defined(_WIN32) && !defined(__Userspace__)
+ first = 1;
+ last = 0xffff;
+#else
+#if defined(__Userspace__)
+ /* TODO ensure uid is 0, etc... */
+#elif (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ if (ip_inp->inp_flags & INP_HIGHPORT) {
+ first = MODULE_GLOBAL(ipport_hifirstauto);
+ last = MODULE_GLOBAL(ipport_hilastauto);
+ } else if (ip_inp->inp_flags & INP_LOWPORT) {
+ if (p && (error =
+#if defined(__FreeBSD__)
+ priv_check(p, PRIV_NETINET_RESERVEDPORT)
+#elif defined(__APPLE__)
+ suser(p->p_ucred, &p->p_acflag)
+#else
+ suser(p, 0)
+#endif
+ )) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+ first = MODULE_GLOBAL(ipport_lowfirstauto);
+ last = MODULE_GLOBAL(ipport_lowlastauto);
+ } else {
+#endif
+ first = MODULE_GLOBAL(ipport_firstauto);
+ last = MODULE_GLOBAL(ipport_lastauto);
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ }
+#endif
+#endif
+ if (first > last) {
+ uint16_t temp;
+
+ temp = first;
+ first = last;
+ last = temp;
+ }
+ count = last - first + 1; /* number of candidates */
+ candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count);
+
+ done = 0;
+ while (!done) {
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (sctp_isport_inuse(inp, htons(candidate), inp->m_vrf_ids[i]) != NULL) {
+ break;
+ }
+ }
+ if (i == inp->num_vrfs) {
+ done = 1;
+ }
+#else
+ if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) {
+ done = 1;
+ }
+#endif
+ if (!done) {
+ if (--count == 0) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ if (candidate == last)
+ candidate = first;
+ else
+ candidate = candidate + 1;
+ }
+ }
+ lport = htons(candidate);
+ }
+ SCTP_INP_DECR_REF(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
+ SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /*
+ * this really should not happen. The guy did a non-blocking
+ * bind and then did a close at the same time.
+ */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ /* ok we look clear to give out this port, so lets setup the binding */
+ if (bindall) {
+ /* binding to all addresses, so just set in the proper flags */
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
+ /* set the automatic addr changes from kernel flag */
+ if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ }
+ /* set the automatic mobility_base from kernel
+ flag (by micchie)
+ */
+ if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) {
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ } else {
+ sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ }
+ /* set the automatic mobility_fasthandoff from kernel
+ flag (by micchie)
+ */
+ if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) {
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ } else {
+ sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ }
+ } else {
+ /*
+ * bind specific, make sure flags is off and add a new
+ * address structure to the sctp_addr_list inside the ep
+ * structure.
+ *
+ * We will need to allocate one and insert it at the head. The
+ * socketopt call can just insert new addresses in there as
+ * well. It will also have to do the embed scope kame hack
+ * too (before adding).
+ */
+ struct sctp_ifa *ifa;
+ union sctp_sockstore store;
+
+ memset(&store, 0, sizeof(store));
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ memcpy(&store.sin, addr, sizeof(struct sockaddr_in));
+ store.sin.sin_port = 0;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(&store.sin6, addr, sizeof(struct sockaddr_in6));
+ store.sin6.sin6_port = 0;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(&store.sconn, addr, sizeof(struct sockaddr_conn));
+ store.sconn.sconn_port = 0;
+ break;
+#endif
+ default:
+ break;
+ }
+ /*
+ * first find the interface with the bound address need to
+ * zero out the port to find the address! yuck! can't do
+ * this earlier since need port for sctp_pcb_findep()
+ */
+ if (sctp_ifap != NULL) {
+ ifa = sctp_ifap;
+ } else {
+ /* Note for BSD we hit here always other
+ * O/S's will pass things in via the
+ * sctp_ifap argument.
+ */
+ ifa = sctp_find_ifa_by_addr(&store.sa,
+ vrf_id, SCTP_ADDR_NOT_LOCKED);
+ }
+ if (ifa == NULL) {
+ /* Can't find an interface with that address */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ /* GAK, more FIXME IFA lock? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-existent addr. */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ }
+#endif
+ /* we're not bound all */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
+ /* allow bindx() to send ASCONF's for binding changes */
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ /* clear automatic addr changes from kernel flag */
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+
+ /* add this address to the endpoint list */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0);
+ if (error != 0) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ inp->laddr_count++;
+ }
+ /* find the bucket */
+ if (port_reuse_active) {
+ /* Put it into tcp 1-2-1 hash */
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))];
+ inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+ } else {
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))];
+ }
+ /* put it in the bucket */
+ LIST_INSERT_HEAD(head, inp, sctp_hash);
+ SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n",
+ (void *)head, ntohs(lport), port_reuse_active);
+ /* set in the port */
+ inp->sctp_lport = lport;
+
+ /* turn off just the unbound flag */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+}
+
+
+static void
+sctp_iterator_inp_being_freed(struct sctp_inpcb *inp)
+{
+ struct sctp_iterator *it, *nit;
+
+ /*
+ * We enter with the only the ITERATOR_LOCK in place and a write
+ * lock on the inp_info stuff.
+ */
+ it = sctp_it_ctl.cur_it;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (it && (it->vn != curvnet)) {
+ /* Its not looking at our VNET */
+ return;
+ }
+#endif
+ if (it && (it->inp == inp)) {
+ /*
+ * This is tricky and we hold the iterator lock,
+ * but when it returns and gets the lock (when we
+ * release it) the iterator will try to operate on
+ * inp. We need to stop that from happening. But
+ * of course the iterator has a reference on the
+ * stcb and inp. We can mark it and it will stop.
+ *
+ * If its a single iterator situation, we
+ * set the end iterator flag. Otherwise
+ * we set the iterator to go to the next inp.
+ *
+ */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+ } else {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP;
+ }
+ }
+ /* Now go through and remove any single reference to
+ * our inp that may be still pending on the list
+ */
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (it->vn != curvnet) {
+ continue;
+ }
+#endif
+ if (it->inp == inp) {
+ /* This one points to me is it inp specific? */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ /* Remove and free this one */
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead,
+ it, sctp_nxt_itr);
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it, SCTP_M_ITER);
+ } else {
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ if (it->inp) {
+ SCTP_INP_INCR_REF(it->inp);
+ }
+ }
+ /* When its put in the refcnt is incremented so decr it */
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+}
+
+/* release sctp_inpcb unbind the port */
+void
+sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
+{
+ /*
+ * Here we free a endpoint. We must find it (if it is in the Hash
+ * table) and remove it from there. Then we must also find it in the
+ * overall list and remove it from there. After all removals are
+ * complete then any timer has to be stopped. Then start the actual
+ * freeing. a) Any local lists. b) Any associations. c) The hash of
+ * all associations. d) finally the ep itself.
+ */
+ struct sctp_tcb *asoc, *nasoc;
+ struct sctp_laddr *laddr, *nladdr;
+ struct inpcb *ip_pcb;
+ struct socket *so;
+ int being_refed = 0;
+ struct sctp_queued_to_read *sq, *nsq;
+#if !defined(__Userspace__)
+#if !defined(__FreeBSD__)
+ sctp_rtentry_t *rt;
+#endif
+#endif
+ int cnt;
+ sctp_sharedkey_t *shared_key, *nshared_key;
+
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sctp_lock_assert(SCTP_INP_SO(inp));
+#endif
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 0);
+#endif
+ SCTP_ITERATOR_LOCK();
+ /* mark any iterators on the list or being processed */
+ sctp_iterator_inp_being_freed(inp);
+ SCTP_ITERATOR_UNLOCK();
+ so = inp->sctp_socket;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* been here before.. eeks.. get out of here */
+ SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 1);
+#endif
+ return;
+ }
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_INFO_WLOCK();
+
+ SCTP_INP_WLOCK(inp);
+ if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
+ /* socket is gone, so no more wakeups allowed */
+ inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE;
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+
+ }
+ /* First time through we have the socket lock, after that no more. */
+ sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_1);
+
+ if (inp->control) {
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ if (inp->pkt) {
+ sctp_m_freem(inp->pkt);
+ inp->pkt = NULL;
+ }
+ ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer
+ * here but I will be nice :> (i.e.
+ * ip_pcb = ep;) */
+ if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) {
+ int cnt_in_sd;
+
+ cnt_in_sd = 0;
+ LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) {
+ SCTP_TCB_LOCK(asoc);
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* Skip guys being freed */
+ cnt_in_sd++;
+ if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+ /*
+ * Special case - we did not start a kill
+ * timer on the asoc due to it was not
+ * closed. So go ahead and start it now.
+ */
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_IN_ACCEPT_QUEUE);
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+ }
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ if (((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) &&
+ (asoc->asoc.total_output_queue_size == 0)) {
+ /* If we have data in queue, we don't want to just
+ * free since the app may have done, send()/close
+ * or connect/send/close. And it wants the data
+ * to get across first.
+ */
+ /* Just abandon things in the front states */
+ if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ }
+ /* Disconnect the socket please */
+ asoc->sctp_socket = NULL;
+ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_CLOSED_SOCKET);
+ if ((asoc->asoc.size_on_reasm_queue > 0) ||
+ (asoc->asoc.control_pdapi) ||
+ (asoc->asoc.size_on_all_streams > 0) ||
+ (so && (so->so_rcv.sb_cc > 0))) {
+ /* Left with Data unread */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3;
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc,
+ SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.stream_queue_cnt == 0)) {
+ if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(asoc, &asoc->asoc)) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ struct sctp_nets *netp;
+
+ /*
+ * there is nothing queued to send,
+ * so I send shutdown
+ */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(asoc);
+ if (asoc->asoc.alternate) {
+ netp = asoc->asoc.alternate;
+ } else {
+ netp = asoc->asoc.primary_destination;
+ }
+ sctp_send_shutdown(asoc, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
+ netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, NULL);
+ sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED);
+ }
+ } else {
+ /* mark into shutdown pending */
+ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, NULL);
+ if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(asoc, &asoc->asoc)) {
+ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+ abort_anyway:
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5;
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc,
+ SCTP_PCBFREE_NOFORCE,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_6) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ } else {
+ sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ }
+ }
+ cnt_in_sd++;
+ SCTP_TCB_UNLOCK(asoc);
+ }
+ /* now is there some left in our SHUTDOWN state? */
+ if (cnt_in_sd) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 2);
+#endif
+ inp->sctp_socket = NULL;
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+ }
+ inp->sctp_socket = NULL;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * ok, this guy has been bound. It's port is
+ * somewhere in the SCTP_BASE_INFO(hash table). Remove
+ * it!
+ */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
+ }
+
+ /* If there is a timer running to kill us,
+ * forget it, since it may have a contest
+ * on the INP lock.. which would cause us
+ * to die ...
+ */
+ cnt = 0;
+ LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) {
+ SCTP_TCB_LOCK(asoc);
+ if (immediate != SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) {
+ /* Disconnect the socket please */
+ asoc->sctp_socket = NULL;
+ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_CLOSED_SOCKET);
+ }
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_IN_ACCEPT_QUEUE);
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+ }
+ cnt++;
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ /* Free associations that are NOT killing us */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
+ ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7;
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ cnt++;
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_8) == 0) {
+ cnt++;
+ }
+ }
+ if (cnt) {
+ /* Ok we have someone out there that will kill us */
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 3);
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+ if (SCTP_INP_LOCK_CONTENDED(inp))
+ being_refed++;
+ if (SCTP_INP_READ_CONTENDED(inp))
+ being_refed++;
+ if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp))
+ being_refed++;
+ /* NOTE: 0 refcount also means no timers are referencing us. */
+ if ((inp->refcount) ||
+ (being_refed) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 4);
+#endif
+ sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+ inp->sctp_ep.signature_change.type = 0;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
+ /* Remove it from the list .. last thing we need a
+ * lock for.
+ */
+ LIST_REMOVE(inp, sctp_list);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 5);
+#endif
+
+#if !(defined(_WIN32) || defined(__Userspace__))
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ rt = ip_pcb->inp_route.ro_rt;
+#endif
+#endif
+
+ if ((inp->sctp_asocidhash) != NULL) {
+ SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark);
+ inp->sctp_asocidhash = NULL;
+ }
+ /*sa_ignore FREED_MEMORY*/
+ TAILQ_FOREACH_SAFE(sq, &inp->read_queue, next, nsq) {
+ /* Its only abandoned if it had data left */
+ if (sq->length)
+ SCTP_STAT_INCR(sctps_left_abandon);
+
+ TAILQ_REMOVE(&inp->read_queue, sq, next);
+ sctp_free_remote_addr(sq->whoFrom);
+ if (so)
+ so->so_rcv.sb_cc -= sq->length;
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ /*
+ * no need to free the net count, since at this point all
+ * assoc's are gone.
+ */
+ sctp_free_a_readq(NULL, sq);
+ }
+ /* Now the sctp_pcb things */
+ /*
+ * free each asoc if it is not already closed/free. we can't use the
+ * macro here since le_next will get freed as part of the
+ * sctp_free_assoc() call.
+ */
+ if (ip_pcb->inp_options) {
+ (void)sctp_m_free(ip_pcb->inp_options);
+ ip_pcb->inp_options = 0;
+ }
+
+#if !(defined(_WIN32) || defined(__Userspace__))
+#if !defined(__FreeBSD__)
+ if (rt) {
+ RTFREE(rt);
+ ip_pcb->inp_route.ro_rt = 0;
+ }
+#endif
+#endif
+
+#ifdef INET6
+#if !(defined(_WIN32) || defined(__Userspace__))
+#if (defined(__FreeBSD__) || defined(__APPLE__) && !defined(__Userspace__))
+ if (ip_pcb->inp_vflag & INP_IPV6) {
+#else
+ if (inp->inp_vflag & INP_IPV6) {
+#endif
+ ip6_freepcbopts(ip_pcb->in6p_outputopts);
+ }
+#endif
+#endif /* INET6 */
+ ip_pcb->inp_vflag = 0;
+ /* free up authentication fields */
+ if (inp->sctp_ep.local_auth_chunks != NULL)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (inp->sctp_ep.local_hmacs != NULL)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+
+ LIST_FOREACH_SAFE(shared_key, &inp->sctp_ep.shared_keys, next, nshared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ /*sa_ignore FREED_MEMORY*/
+ }
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ inp->ip_inp.inp.inp_state = INPCB_STATE_DEAD;
+ if (in_pcb_checkstate(&inp->ip_inp.inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
+#ifdef INVARIANTS
+ panic("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp);
+#else
+ SCTP_PRINTF("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp);
+#endif
+ }
+ inp->ip_inp.inp.inp_socket->so_flags |= SOF_PCBCLEARING;
+#endif
+ /*
+ * if we have an address list the following will free the list of
+ * ifaddr's that are set into this ep. Again macro limitations here,
+ * since the LIST_FOREACH could be a bad idea.
+ */
+ LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+ sctp_remove_laddr(laddr);
+ }
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+ /* TEMP CODE */
+ LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_free_list, sctp_tcblist, nasoc) {
+ LIST_REMOVE(asoc, sctp_tcblist);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc);
+ SCTP_DECR_ASOC_COUNT();
+ }
+ /* *** END TEMP CODE ****/
+#endif
+#ifdef SCTP_MVRF
+ SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+#endif
+ /* Now lets see about freeing the EP hash table. */
+ if (inp->sctp_tcbhash != NULL) {
+ SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+ inp->sctp_tcbhash = NULL;
+ }
+ /* Now we must put the ep memory back into the zone pool */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ crfree(inp->ip_inp.inp.inp_cred);
+ INP_LOCK_DESTROY(&inp->ip_inp.inp);
+#endif
+ SCTP_INP_LOCK_DESTROY(inp);
+ SCTP_INP_READ_DESTROY(inp);
+ SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_DECR_EP_COUNT();
+#else
+ /* For Tiger, we will do this later... */
+#endif
+}
+
+
+struct sctp_nets *
+sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
+{
+ struct sctp_nets *net;
+ /* locate the address */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
+ return (net);
+ }
+ return (NULL);
+}
+
+
+int
+sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id)
+{
+ struct sctp_ifa *sctp_ifa;
+ sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (sctp_ifa) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+/*
+ * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
+ * when a ASCONF arrives that adds it. It will also initialize all the cwnd
+ * stats of stuff.
+ */
+int
+sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
+ struct sctp_nets **netp, uint16_t port, int set_scope, int from)
+{
+ /*
+ * The following is redundant to the same lines in the
+ * sctp_aloc_assoc() but is needed since others call the add
+ * address function
+ */
+ struct sctp_nets *net, *netfirst;
+ int addr_inscope;
+
+ SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ",
+ from);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr);
+
+ netfirst = sctp_findnet(stcb, newaddr);
+ if (netfirst) {
+ /*
+ * Lie and return ok, we don't want to make the association
+ * go away for this behavior. It will happen in the TCP
+ * model in a connected socket. It does not reach the hash
+ * table until after the association is built so it can't be
+ * found. Mark as reachable, since the initial creation will
+ * have been cleared and the NOT_IN_ASSOC flag will have
+ * been added... and we don't want to end up removing it
+ * back out.
+ */
+ if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netfirst->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED);
+ } else {
+ netfirst->dest_state = SCTP_ADDR_REACHABLE;
+ }
+
+ return (0);
+ }
+ addr_inscope = 1;
+ switch (newaddr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)newaddr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* zero out the zero area */
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+
+ /* assure len is set */
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+ if (set_scope) {
+ if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ }
+ } else {
+ /* Validate the address is in scope */
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
+ (stcb->asoc.scope.ipv4_local_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)newaddr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* assure len is set */
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ if (set_scope) {
+ if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) {
+ stcb->asoc.scope.loopback_scope = 1;
+ stcb->asoc.scope.local_scope = 0;
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ stcb->asoc.scope.site_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is a LINK_LOCAL we
+ * must have common site scope. Don't set
+ * the local scope since we may not share
+ * all links, only loopback can do this.
+ * Links on the local network would also be
+ * on our private network for v4 too.
+ */
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ stcb->asoc.scope.site_scope = 1;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is SITE_LOCAL then
+ * we must have site scope in common.
+ */
+ stcb->asoc.scope.site_scope = 1;
+ }
+ } else {
+ /* Validate the address is in scope */
+ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
+ (stcb->asoc.scope.loopback_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.scope.local_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.scope.site_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)newaddr;
+ if (sconn->sconn_addr == NULL) {
+ /* Invalid address */
+ return (-1);
+ }
+#ifdef HAVE_SCONN_LEN
+ sconn->sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ break;
+ }
+#endif
+ default:
+ /* not supported family type */
+ return (-1);
+ }
+ net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets);
+ if (net == NULL) {
+ return (-1);
+ }
+ SCTP_INCR_RADDR_COUNT();
+ memset(net, 0, sizeof(struct sctp_nets));
+ (void)SCTP_GETTIME_TIMEVAL(&net->start_time);
+#ifdef HAVE_SA_LEN
+ memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
+#endif
+ switch (newaddr->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifndef HAVE_SA_LEN
+ memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in));
+#endif
+ ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifndef HAVE_SA_LEN
+ memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in6));
+#endif
+ ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+#ifndef HAVE_SA_LEN
+ memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_conn));
+#endif
+ ((struct sockaddr_conn *)&net->ro._l_addr)->sconn_port = stcb->rport;
+ break;
+#endif
+ default:
+ break;
+ }
+ net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id);
+ if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) {
+ stcb->asoc.scope.loopback_scope = 1;
+ stcb->asoc.scope.ipv4_local_scope = 1;
+ stcb->asoc.scope.local_scope = 0;
+ stcb->asoc.scope.site_scope = 1;
+ addr_inscope = 1;
+ }
+ net->failure_threshold = stcb->asoc.def_net_failure;
+ net->pf_threshold = stcb->asoc.def_net_pf_threshold;
+ if (addr_inscope == 0) {
+ net->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_OUT_OF_SCOPE);
+ } else {
+ if (from == SCTP_ADDR_IS_CONFIRMED)
+ /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */
+ net->dest_state = SCTP_ADDR_REACHABLE;
+ else
+ net->dest_state = SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED;
+ }
+ /* We set this to 0, the timer code knows that
+ * this means its an initial value
+ */
+ net->rto_needed = 1;
+ net->RTO = 0;
+ net->RTO_measured = 0;
+ stcb->asoc.numnets++;
+ net->ref_count = 1;
+ net->cwr_window_tsn = net->last_cwr_tsn = stcb->asoc.sending_seq - 1;
+ net->port = port;
+ net->dscp = stcb->asoc.default_dscp;
+#ifdef INET6
+ net->flowlabel = stcb->asoc.default_flowlabel;
+#endif
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+ net->dest_state |= SCTP_ADDR_NOHB;
+ } else {
+ net->dest_state &= ~SCTP_ADDR_NOHB;
+ }
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+ net->dest_state |= SCTP_ADDR_NO_PMTUD;
+ } else {
+ net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+ }
+ net->heart_beat_delay = stcb->asoc.heart_beat_delay;
+ /* Init the timer structure */
+ SCTP_OS_TIMER_INIT(&net->rxt_timer.timer);
+ SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer);
+ SCTP_OS_TIMER_INIT(&net->hb_timer.timer);
+
+ /* Now generate a route for this guy */
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ /* KAME hack: embed scopeid */
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL);
+#else
+ (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL, NULL);
+#endif
+#elif defined(SCTP_KAME)
+ (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+#else
+ (void)in6_embedscope(&sin6->sin6_addr, sin6);
+#endif
+#ifndef SCOPEDROUTING
+ sin6->sin6_scope_id = 0;
+#endif
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+ SCTP_RTALLOC((sctp_route_t *)&net->ro,
+ stcb->asoc.vrf_id,
+ stcb->sctp_ep->fibnum);
+
+ net->src_addr_selected = 0;
+#if !defined(__Userspace__)
+ if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) {
+ /* Get source address */
+ net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep,
+ stcb,
+ (sctp_route_t *)&net->ro,
+ net,
+ 0,
+ stcb->asoc.vrf_id);
+ if (stcb->asoc.default_mtu > 0) {
+ net->mtu = stcb->asoc.default_mtu;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ net->mtu += SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ net->mtu += SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ net->mtu += sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+#if defined(INET) || defined(INET6)
+ if (net->port) {
+ net->mtu += (uint32_t)sizeof(struct udphdr);
+ }
+#endif
+ } else if (net->ro._s_addr != NULL) {
+ uint32_t imtu, rmtu, hcmtu;
+
+ net->src_addr_selected = 1;
+ /* Now get the interface MTU */
+ if (net->ro._s_addr->ifn_p != NULL) {
+ imtu = SCTP_GATHER_MTU_FROM_INTFC(net->ro._s_addr->ifn_p);
+ } else {
+ imtu = 0;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_nh);
+ hcmtu = sctp_hc_get_mtu(&net->ro._l_addr, stcb->sctp_ep->fibnum);
+#else
+ rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt);
+ hcmtu = 0;
+#endif
+ net->mtu = sctp_min_mtu(hcmtu, rmtu, imtu);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#else
+ if (rmtu == 0) {
+ /* Start things off to match mtu of interface please. */
+ SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa,
+ net->ro.ro_rt, net->mtu);
+ }
+#endif
+ }
+ }
+#endif
+ if (net->mtu == 0) {
+ if (stcb->asoc.default_mtu > 0) {
+ net->mtu = stcb->asoc.default_mtu;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ net->mtu += SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ net->mtu += SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ net->mtu += sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+#if defined(INET) || defined(INET6)
+ if (net->port) {
+ net->mtu += (uint32_t)sizeof(struct udphdr);
+ }
+#endif
+ } else {
+ switch (newaddr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ net->mtu = SCTP_DEFAULT_MTU;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ net->mtu = 1280;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ net->mtu = 1280;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+#if defined(INET) || defined(INET6)
+ if (net->port) {
+ net->mtu -= (uint32_t)sizeof(struct udphdr);
+ }
+#endif
+ if (from == SCTP_ALLOC_ASOC) {
+ stcb->asoc.smallest_mtu = net->mtu;
+ }
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(sin6);
+#else
+ (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+
+ /* JRS - Use the congestion control given in the CC module */
+ if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL)
+ (*stcb->asoc.cc_functions.sctp_set_initial_cc_param)(stcb, net);
+
+ /*
+ * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
+ * of assoc (2005/06/27, iyengar@cis.udel.edu)
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /* Choose an initial flowid. */
+ net->flowid = stcb->asoc.my_vtag ^
+ ntohs(stcb->rport) ^
+ ntohs(stcb->sctp_ep->sctp_lport);
+ net->flowtype = M_HASHTYPE_OPAQUE_HASH;
+#endif
+ if (netp) {
+ *netp = net;
+ }
+ netfirst = TAILQ_FIRST(&stcb->asoc.nets);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net->ro.ro_nh == NULL) {
+#else
+ if (net->ro.ro_rt == NULL) {
+#endif
+ /* Since we have no route put it at the back */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+ } else if (netfirst == NULL) {
+ /* We are the first one in the pool. */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if (netfirst->ro.ro_nh == NULL) {
+#else
+ } else if (netfirst->ro.ro_rt == NULL) {
+#endif
+ /*
+ * First one has NO route. Place this one ahead of the first
+ * one.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if (net->ro.ro_nh->nh_ifp != netfirst->ro.ro_nh->nh_ifp) {
+#else
+ } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
+#endif
+ /*
+ * This one has a different interface than the one at the
+ * top of the list. Place it ahead.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else {
+ /*
+ * Ok we have the same interface as the first one. Move
+ * forward until we find either a) one with a NULL route...
+ * insert ahead of that b) one with a different ifp.. insert
+ * after that. c) end of the list.. insert at the tail.
+ */
+ struct sctp_nets *netlook;
+
+ do {
+ netlook = TAILQ_NEXT(netfirst, sctp_next);
+ if (netlook == NULL) {
+ /* End of the list */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+ break;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if (netlook->ro.ro_nh == NULL) {
+#else
+ } else if (netlook->ro.ro_rt == NULL) {
+#endif
+ /* next one has NO route */
+ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
+ break;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if (netlook->ro.ro_nh->nh_ifp != net->ro.ro_nh->nh_ifp) {
+#else
+ } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) {
+#endif
+ TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
+ net, sctp_next);
+ break;
+ }
+ /* Shift forward */
+ netfirst = netlook;
+ } while (netlook != NULL);
+ }
+
+ /* got to have a primary set */
+ if (stcb->asoc.primary_destination == 0) {
+ stcb->asoc.primary_destination = net;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else if ((stcb->asoc.primary_destination->ro.ro_nh == NULL) &&
+ (net->ro.ro_nh) &&
+#else
+ } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
+ (net->ro.ro_rt) &&
+#endif
+ ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
+ /* No route to current primary adopt new primary */
+ stcb->asoc.primary_destination = net;
+ }
+ /* Validate primary is first */
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if ((net != stcb->asoc.primary_destination) &&
+ (stcb->asoc.primary_destination)) {
+ /* first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficient if
+ * the primary is the first on the list, make it
+ * so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+}
+
+
+static uint32_t
+sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint32_t id;
+ struct sctpasochead *head;
+ struct sctp_tcb *lstcb;
+
+ try_again:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* TSNH */
+ return (0);
+ }
+ /*
+ * We don't allow assoc id to be one of SCTP_FUTURE_ASSOC,
+ * SCTP_CURRENT_ASSOC and SCTP_ALL_ASSOC.
+ */
+ if (inp->sctp_associd_counter <= SCTP_ALL_ASSOC) {
+ inp->sctp_associd_counter = SCTP_ALL_ASSOC + 1;
+ }
+ id = inp->sctp_associd_counter;
+ inp->sctp_associd_counter++;
+ lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t)id, 0);
+ if (lstcb) {
+ goto try_again;
+ }
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash);
+ stcb->asoc.in_asocid_hash = 1;
+ return (id);
+}
+
+/*
+ * allocate an association and add it to the endpoint. The caller must be
+ * careful to add all additional addresses once they are know right away or
+ * else the assoc will be may experience a blackout scenario.
+ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
+ int *error, uint32_t override_tag, uint32_t vrf_id,
+ uint16_t o_streams, uint16_t port,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *p,
+#elif defined(_WIN32) && !defined(__Userspace__)
+ PKTHREAD p,
+#else
+#if defined(__Userspace__)
+ /* __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */
+#endif
+ struct proc *p,
+#endif
+ int initialize_auth_params)
+{
+ /* note the p argument is only valid in unbound sockets */
+
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+ struct sctpasochead *head;
+ uint16_t rport;
+ int err;
+
+ /*
+ * Assumption made here: Caller has done a
+ * sctp_findassociation_ep_addr(ep, addr's); to make sure the
+ * address does not exist already.
+ */
+ if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) {
+ /* Hit max assoc, sorry no more */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ if (firstaddr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+ /*
+ * If its in the TCP pool, its NOT allowed to create an
+ * association. The parent listener needs to call
+ * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
+ * off, or connected one does this.. its an error.
+ */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:");
+#ifdef SCTP_DEBUG
+ if (firstaddr) {
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr);
+ switch (firstaddr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+ ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+ ntohs(((struct sockaddr_in6 *)firstaddr)->sin6_port));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+ ntohs(((struct sockaddr_conn *)firstaddr)->sconn_port));
+ break;
+#endif
+ default:
+ break;
+ }
+ } else {
+ SCTPDBG(SCTP_DEBUG_PCB3,"None\n");
+ }
+#endif /* SCTP_DEBUG */
+ switch (firstaddr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)firstaddr;
+ if ((ntohs(sin->sin_port) == 0) ||
+ (sin->sin_addr.s_addr == INADDR_ANY) ||
+ (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin->sin_port;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)firstaddr;
+ if ((ntohs(sin6->sin6_port) == 0) ||
+ IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
+ IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin6->sin6_port;
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)firstaddr;
+ if ((ntohs(sconn->sconn_port) == 0) ||
+ (sconn->sconn_addr == NULL)) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sconn->sconn_port;
+ break;
+ }
+#endif
+ default:
+ /* not supported family type */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * If you have not performed a bind, then we need to do the
+ * ephemeral bind for you.
+ */
+ if ((err = sctp_inpcb_bind(inp->sctp_socket, NULL, NULL, p))) {
+ /* bind error, probably perm */
+ *error = err;
+ return (NULL);
+ }
+ }
+ stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb);
+ if (stcb == NULL) {
+ /* out of memory? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+ *error = ENOMEM;
+ return (NULL);
+ }
+ SCTP_INCR_ASOC_COUNT();
+
+ memset(stcb, 0, sizeof(*stcb));
+ asoc = &stcb->asoc;
+
+ SCTP_TCB_LOCK_INIT(stcb);
+ SCTP_TCB_SEND_LOCK_INIT(stcb);
+ stcb->rport = rport;
+ /* setup back pointer's */
+ stcb->sctp_ep = inp;
+ stcb->sctp_socket = inp->sctp_socket;
+ if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id, o_streams))) {
+ /* failed */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+ *error = err;
+ return (NULL);
+ }
+ /* and the port */
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* inpcb freed while alloc going on */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_DECR_ASOC_COUNT();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_TCB_LOCK(stcb);
+
+ asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb);
+ /* now that my_vtag is set, add it to the hash */
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /* put it in the bucket in the vtag hash of assoc's for the system */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ SCTP_INP_INFO_WUNLOCK();
+
+ if ((err = sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
+ /* failure.. memory error? */
+ if (asoc->strmout) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = NULL;
+ }
+ if (asoc->mapping_array) {
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = NULL;
+ }
+ if (asoc->nr_mapping_array) {
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->nr_mapping_array = NULL;
+ }
+ SCTP_DECR_ASOC_COUNT();
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ /* Init all the timers */
+ SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer);
+
+ LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /* now file the port under the hash as well */
+ if (inp->sctp_tcbhash != NULL) {
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
+ inp->sctp_hashmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
+ }
+ if (initialize_auth_params == SCTP_INITIALIZE_AUTH_PARAMS) {
+ sctp_initialize_auth_params(inp, stcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb);
+ return (stcb);
+}
+
+
+void
+sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_association *asoc;
+
+ inp = stcb->sctp_ep;
+ asoc = &stcb->asoc;
+ asoc->numnets--;
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ if (net == asoc->primary_destination) {
+ /* Reset primary */
+ struct sctp_nets *lnet;
+
+ lnet = TAILQ_FIRST(&asoc->nets);
+ /* Mobility adaptation
+ Ideally, if deleted destination is the primary, it becomes
+ a fast retransmission trigger by the subsequent SET PRIMARY.
+ (by micchie)
+ */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n");
+ if (asoc->deleted_primary != NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n");
+ goto out;
+ }
+ asoc->deleted_primary = net;
+ atomic_add_int(&net->ref_count, 1);
+ memset(&net->lastsa, 0, sizeof(net->lastsa));
+ memset(&net->lastsv, 0, sizeof(net->lastsv));
+ sctp_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED);
+ sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED,
+ stcb->sctp_ep, stcb, NULL);
+ }
+out:
+ /* Try to find a confirmed primary */
+ asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0);
+ }
+ if (net == asoc->last_data_chunk_from) {
+ /* Reset primary */
+ asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
+ }
+ if (net == asoc->last_control_chunk_from) {
+ /* Clear net */
+ asoc->last_control_chunk_from = NULL;
+ }
+ if (net == stcb->asoc.alternate) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_9);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_10);
+ net->dest_state |= SCTP_ADDR_BEING_DELETED;
+ sctp_free_remote_addr(net);
+}
+
+/*
+ * remove a remote endpoint address from an association, it will fail if the
+ * address does not exist.
+ */
+int
+sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
+{
+ /*
+ * Here we need to remove a remote address. This is quite simple, we
+ * first find it in the list of address for the association
+ * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
+ * on that item. Note we do not allow it to be removed if there are
+ * no other addresses.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *nnet;
+
+ asoc = &stcb->asoc;
+
+ /* locate the address */
+ TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) {
+ if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
+ continue;
+ }
+ if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
+ remaddr)) {
+ /* we found the guy */
+ if (asoc->numnets < 2) {
+ /* Must have at LEAST two remote addresses */
+ return (-1);
+ } else {
+ sctp_remove_net(stcb, net);
+ return (0);
+ }
+ }
+ }
+ /* not found. */
+ return (-2);
+}
+
+void
+sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ int found = 0;
+ int i;
+
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+}
+
+int
+sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ int found = 0;
+ int i;
+
+ SCTP_INP_INFO_WLOCK();
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+ SCTP_INP_INFO_WUNLOCK();
+ return (found);
+}
+
+
+void
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct timeval now;
+ int set, i;
+
+ if (time == 0) {
+ /* Its disabled */
+ return;
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ set = 0;
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ /* Block(s) present, lets find space, and expire on the fly */
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == 0) &&
+ !set) {
+ twait_block->vtag_block[i].tv_sec_at_expire =
+ now.tv_sec + time;
+ twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
+ set = 1;
+ } else if ((twait_block->vtag_block[i].v_tag) &&
+ ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ if (set == 0) {
+ /* Reuse it for my new tag */
+ twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time;
+ twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
+ set = 1;
+ }
+ }
+ }
+ if (set) {
+ /*
+ * We only do up to the block where we can
+ * place our tag for audits
+ */
+ break;
+ }
+ }
+ /* Need to add a new block to chain */
+ if (!set) {
+ SCTP_MALLOC(twait_block, struct sctp_tagblock *,
+ sizeof(struct sctp_tagblock), SCTP_M_TIMW);
+ if (twait_block == NULL) {
+ return;
+ }
+ memset(twait_block, 0, sizeof(struct sctp_tagblock));
+ LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
+ twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time;
+ twait_block->vtag_block[0].v_tag = tag;
+ twait_block->vtag_block[0].lport = lport;
+ twait_block->vtag_block[0].rport = rport;
+ }
+}
+
+void
+sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_queued_to_read *control, *ncontrol;
+
+ TAILQ_FOREACH_SAFE(control, rh, next_instrm, ncontrol) {
+ TAILQ_REMOVE(rh, control, next_instrm);
+ control->on_strm_q = 0;
+ if (control->on_read_q == 0) {
+ sctp_free_remote_addr(control->whoFrom);
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ }
+ /* Reassembly free? */
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /*sa_ignore FREED_MEMORY*/
+ }
+ /*
+ * We don't free the address here
+ * since all the net's were freed
+ * above.
+ */
+ if (control->on_read_q == 0) {
+ sctp_free_a_readq(stcb, control);
+ }
+ }
+}
+
+/*-
+ * Free the association after un-hashing the remote port. This
+ * function ALWAYS returns holding NO LOCK on the stcb. It DOES
+ * expect that the input to this function IS a locked TCB.
+ * It will return 0, if it did NOT destroy the association (instead
+ * it unlocks it. It will return NON-zero if it either destroyed the
+ * association OR the association is already destroyed.
+ */
+int
+sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
+{
+ int i;
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *nnet;
+ struct sctp_laddr *laddr, *naddr;
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_asconf_addr *aparam, *naparam;
+ struct sctp_asconf_ack *aack, *naack;
+ struct sctp_stream_reset_list *strrst, *nstrrst;
+ struct sctp_queued_to_read *sq, *nsq;
+ struct sctp_stream_queue_pending *sp, *nsp;
+ sctp_sharedkey_t *shared_key, *nshared_key;
+ struct socket *so;
+
+ /* first, lets purge the entry from the hash table. */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sctp_lock_assert(SCTP_INP_SO(inp));
+#endif
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 6);
+#endif
+ if (stcb->asoc.state == 0) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 7);
+#endif
+ /* there is no asoc, really TSNH :-0 */
+ return (1);
+ }
+ if (stcb->asoc.alternate) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ /* TEMP CODE */
+ if (stcb->freed_from_where == 0) {
+ /* Only record the first place free happened from */
+ stcb->freed_from_where = from_location;
+ }
+ /* TEMP CODE */
+#endif
+
+ asoc = &stcb->asoc;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+ else
+ so = inp->sctp_socket;
+
+ /*
+ * We used timer based freeing if a reader or writer is in the way.
+ * So we first check if we are actually being called from a timer,
+ * if so we abort early if a reader or writer is still in the way.
+ */
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ (from_inpcbfree == SCTP_NORMAL_PROC)) {
+ /*
+ * is it the timer driving us? if so are the reader/writers
+ * gone?
+ */
+ if (stcb->asoc.refcnt) {
+ /* nope, reader or writer in the way */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ /* no asoc destroyed */
+ SCTP_TCB_UNLOCK(stcb);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 8);
+#endif
+ return (0);
+ }
+ }
+ /* Now clean up any other timers */
+ sctp_stop_association_timers(stcb, false);
+ /* Now the read queue needs to be cleaned up (only once) */
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_ABOUT_TO_BE_FREED);
+ SCTP_INP_READ_LOCK(inp);
+ TAILQ_FOREACH(sq, &inp->read_queue, next) {
+ if (sq->stcb == stcb) {
+ sq->do_not_ref_stcb = 1;
+ sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ /* If there is no end, there never
+ * will be now.
+ */
+ if (sq->end_added == 0) {
+ /* Held for PD-API clear that. */
+ sq->pdapi_aborted = 1;
+ sq->held_length = 0;
+ if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) {
+ /*
+ * Need to add a PD-API aborted indication.
+ * Setting the control_pdapi assures that it will
+ * be added right after this msg.
+ */
+ uint32_t strseq;
+ stcb->asoc.control_pdapi = sq;
+ strseq = (sq->sinfo_stream << 16) | (sq->mid & 0x0000ffff);
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ (void *)&strseq,
+ SCTP_SO_LOCKED);
+ stcb->asoc.control_pdapi = NULL;
+ }
+ }
+ /* Add an end to wake them */
+ sq->end_added = 1;
+ }
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ if (stcb->block_entry) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET);
+ stcb->block_entry->error = ECONNRESET;
+ stcb->block_entry = NULL;
+ }
+ }
+ if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) {
+ /* Someone holds a reference OR the socket is unaccepted yet.
+ */
+ if ((stcb->asoc.refcnt) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+ if (so) {
+ /* Wake any reader/writers */
+ sctp_sorwakeup(inp, so);
+ sctp_sowwakeup(inp, so);
+ }
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 9);
+#endif
+ /* no asoc destroyed */
+ return (0);
+ }
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 10);
+#endif
+ /* When I reach here, no others want
+ * to kill the assoc yet.. and I own
+ * the lock. Now its possible an abort
+ * comes in when I do the lock exchange
+ * below to grab all the locks to do
+ * the final take out. to prevent this
+ * we increment the count, which will
+ * start a timer and blow out above thus
+ * assuring us that we hold exclusive
+ * killing of the asoc. Note that
+ * after getting back the TCB lock
+ * we will go ahead and increment the
+ * counter back up and stop any timer
+ * a passing stranger may have started :-S
+ */
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ SCTP_TCB_LOCK(stcb);
+ }
+ /* Double check the GONE flag */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * For TCP type we need special handling when we are
+ * connected. We also include the peel'ed off ones to.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
+ if (so) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ so->so_state |= SS_ISDISCONNECTED;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ socantrcvmore(so);
+#else
+ socantrcvmore_locked(so);
+#endif
+ socantsendmore(so);
+ sctp_sowwakeup(inp, so);
+ sctp_sorwakeup(inp, so);
+ SCTP_SOWAKEUP(so);
+ }
+ }
+ }
+
+ /* Make it invalid too, that way if its
+ * about to run it will abort and return.
+ */
+ /* re-increment the lock */
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+ if (stcb->asoc.refcnt) {
+ SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_INP_WUNLOCK(inp);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ return (0);
+ }
+ asoc->state = 0;
+ if (inp->sctp_tcbhash) {
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ }
+ if (stcb->asoc.in_asocid_hash) {
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ }
+ /* Now lets remove it from the list of ALL associations in the EP */
+ LIST_REMOVE(stcb, sctp_tcblist);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait),
+ inp->sctp_lport, stcb->rport);
+
+ /* Now restop the timers to be sure
+ * this is paranoia at is finest!
+ */
+ sctp_stop_association_timers(stcb, true);
+
+ /*
+ * The chunk lists and such SHOULD be empty but we check them just
+ * in case.
+ */
+ /* anything on the wheel needs to be removed */
+ SCTP_TCB_SEND_LOCK(stcb);
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ struct sctp_stream_out *outs;
+
+ outs = &asoc->strmout[i];
+ /* now clean up any chunks here */
+ TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
+ sctp_free_spbufspace(stcb, asoc, sp);
+ if (sp->data) {
+ if (so) {
+ /* Still an open socket - report */
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+ 0, (void *)sp, SCTP_SO_LOCKED);
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ sp->length = 0;
+ }
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
+ }
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ /*sa_ignore FREED_MEMORY*/
+ TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) {
+ TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp);
+ SCTP_FREE(strrst, SCTP_M_STRESET);
+ }
+ TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ sctp_free_remote_addr(sq->whoFrom);
+ sq->whoFrom = NULL;
+ sq->stcb = NULL;
+ /* Free the ctl entry */
+ sctp_free_a_readq(stcb, sq);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ TAILQ_FOREACH_SAFE(chk, &asoc->free_chunks, sctp_next, nchk) {
+ TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1);
+ asoc->free_chunk_cnt--;
+ /*sa_ignore FREED_MEMORY*/
+ }
+ /* pending send queue SHOULD be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+ }
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ if (chk->data) {
+ if (so) {
+ /* Still a socket? */
+ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+ 0, chk, SCTP_SO_LOCKED);
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ if (chk->whoTo) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /*sa_ignore FREED_MEMORY*/
+ }
+ /* sent queue SHOULD be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+ if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+ }
+ }
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (chk->data) {
+ if (so) {
+ /* Still a socket? */
+ sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
+ 0, chk, SCTP_SO_LOCKED);
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /*sa_ignore FREED_MEMORY*/
+ }
+#ifdef INVARIANTS
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (stcb->asoc.strmout[i].chunks_on_queues > 0) {
+ panic("%u chunks left for stream %u.", stcb->asoc.strmout[i].chunks_on_queues, i);
+ }
+ }
+#endif
+ /* control queue MAY not be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /*sa_ignore FREED_MEMORY*/
+ }
+ /* ASCONF queue MAY not be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /*sa_ignore FREED_MEMORY*/
+ }
+ if (asoc->mapping_array) {
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = NULL;
+ }
+ if (asoc->nr_mapping_array) {
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->nr_mapping_array = NULL;
+ }
+ /* the stream outs */
+ if (asoc->strmout) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = NULL;
+ }
+ asoc->strm_realoutsize = asoc->streamoutcnt = 0;
+ if (asoc->strmin) {
+ for (i = 0; i < asoc->streamincnt; i++) {
+ sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
+ sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
+ }
+ SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+ asoc->strmin = NULL;
+ }
+ asoc->streamincnt = 0;
+ TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) {
+#ifdef INVARIANTS
+ if (SCTP_BASE_INFO(ipi_count_raddr) == 0) {
+ panic("no net's left alloc'ed, or list points to itself");
+ }
+#endif
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ }
+ LIST_FOREACH_SAFE(laddr, &asoc->sctp_restricted_addrs, sctp_nxt_addr, naddr) {
+ /*sa_ignore FREED_MEMORY*/
+ sctp_remove_laddr(laddr);
+ }
+
+ /* pending asconf (address) parameters */
+ TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) {
+ /*sa_ignore FREED_MEMORY*/
+ TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
+ SCTP_FREE(aparam,SCTP_M_ASC_ADDR);
+ }
+ TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) {
+ /*sa_ignore FREED_MEMORY*/
+ TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
+ if (aack->data != NULL) {
+ sctp_m_freem(aack->data);
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
+ }
+ /* clean up auth stuff */
+ if (asoc->local_hmacs)
+ sctp_free_hmaclist(asoc->local_hmacs);
+ if (asoc->peer_hmacs)
+ sctp_free_hmaclist(asoc->peer_hmacs);
+
+ if (asoc->local_auth_chunks)
+ sctp_free_chunklist(asoc->local_auth_chunks);
+ if (asoc->peer_auth_chunks)
+ sctp_free_chunklist(asoc->peer_auth_chunks);
+
+ sctp_free_authinfo(&asoc->authinfo);
+
+ LIST_FOREACH_SAFE(shared_key, &asoc->shared_keys, next, nshared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ /*sa_ignore FREED_MEMORY*/
+ }
+
+ /* Insert new items here :> */
+
+ /* Get rid of LOCK */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ /* TEMP CODE */
+ stcb->freed_from_where = from_location;
+#endif
+#ifdef SCTP_TRACK_FREED_ASOCS
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* now clean up the tasoc itself */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+ } else {
+ LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
+ }
+#else
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+#endif
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* If its NOT the inp_free calling us AND
+ * sctp_close as been called, we
+ * call back...
+ */
+ SCTP_INP_RUNLOCK(inp);
+ /* This will start the kill timer (if we are
+ * the last one) since we hold an increment yet. But
+ * this is the only safe way to do this
+ * since otherwise if the socket closes
+ * at the same time we are here we might
+ * collide in the cleanup.
+ */
+ sctp_inpcb_free(inp,
+ SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ /* The socket is still open. */
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ /* destroyed the asoc */
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 11);
+#endif
+ return (1);
+}
+
+
+
+/*
+ * determine if a destination is "reachable" based upon the addresses bound
+ * to the current endpoint (e.g. only v4 or v6 currently bound)
+ */
+/*
+ * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
+ * assoc level v4/v6 flags, as the assoc *may* not have the same address
+ * types bound as its endpoint
+ */
+int
+sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
+{
+ struct sctp_inpcb *inp;
+ int answer;
+
+ /*
+ * No locks here, the TCB, in all cases is already locked and an
+ * assoc is up. There is either a INP lock by the caller applied (in
+ * asconf case when deleting an address) or NOT in the HB case,
+ * however if HB then the INP increment is up and the INP will not
+ * be removed (on top of the fact that we have a TCB lock). So we
+ * only want to read the sctp_flags, which is either bound-all or
+ * not.. no protection needed since once an assoc is up you can't be
+ * changing your binding.
+ */
+ inp = stcb->sctp_ep;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* if bound all, destination is not restricted */
+ /*
+ * RRS: Question during lock work: Is this correct? If you
+ * are bound-all you still might need to obey the V4--V6
+ * flags??? IMO this bound-all stuff needs to be removed!
+ */
+ return (1);
+ }
+ /* NOTE: all "scope" checks are done when local addresses are added */
+ switch (destaddr->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ answer = inp->ip_inp.inp.inp_vflag & INP_CONN;
+ break;
+#endif
+ default:
+ /* invalid family, so it's unreachable */
+ answer = 0;
+ break;
+ }
+ return (answer);
+}
+
+/*
+ * update the inp_vflags on an endpoint
+ */
+static void
+sctp_update_ep_vflag(struct sctp_inpcb *inp)
+{
+ struct sctp_laddr *laddr;
+
+ /* first clear the flag */
+ inp->ip_inp.inp.inp_vflag = 0;
+ /* set the flag based on addresses on the ep list */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+ __func__);
+ continue;
+ }
+
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ continue;
+ }
+ switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ inp->ip_inp.inp.inp_vflag |= INP_CONN;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * Add the address to the endpoint local address list There is nothing to be
+ * done if we are bound to all addresses
+ */
+void
+sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action)
+{
+ struct sctp_laddr *laddr;
+ struct sctp_tcb *stcb;
+ int fnd, error = 0;
+
+ fnd = 0;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return;
+ }
+#ifdef INET6
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-useable addr. */
+ return;
+ }
+ }
+#endif
+ /* first, is it already present? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+
+ if (fnd == 0) {
+ /* Not in the ep list */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action);
+ if (error != 0)
+ return;
+ inp->laddr_count++;
+ /* update inp_vflag flags */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ inp->ip_inp.inp.inp_vflag |= INP_CONN;
+ break;
+#endif
+ default:
+ break;
+ }
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ sctp_add_local_addr_restricted(stcb, ifa);
+ }
+ }
+ return;
+}
+
+
+/*
+ * select a new (hopefully reachable) destination net (should only be used
+ * when we deleted an ep addr that is the only usable source address to reach
+ * the destination net)
+ */
+static void
+sctp_select_primary_destination(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* for now, we'll just pick the first reachable one we find */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
+ continue;
+ if (sctp_destination_is_reachable(stcb,
+ (struct sockaddr *)&net->ro._l_addr)) {
+ /* found a reachable destination */
+ stcb->asoc.primary_destination = net;
+ }
+ }
+ /* I can't there from here! ...we're gonna die shortly... */
+}
+
+
+/*
+ * Delete the address from the endpoint local address list. There is nothing
+ * to be done if we are bound to all addresses
+ */
+void
+sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+ int fnd;
+
+ fnd = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return;
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (fnd && (inp->laddr_count < 2)) {
+ /* can't delete unless there are at LEAST 2 addresses */
+ return;
+ }
+ if (fnd) {
+ /*
+ * clean up any use of this address go through our
+ * associations and clear any last_used_address that match
+ * this one for each assoc, see if a new primary_destination
+ * is needed
+ */
+ struct sctp_tcb *stcb;
+
+ /* clean up "next_addr_touse" */
+ if (inp->next_addr_touse == laddr)
+ /* delete this address */
+ inp->next_addr_touse = NULL;
+
+ /* clean up "last_used_address" */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ struct sctp_nets *net;
+
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.last_used_address == laddr)
+ /* delete this address */
+ stcb->asoc.last_used_address = NULL;
+ /* Now spin through all the nets and purge any ref to laddr */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->ro._s_addr == laddr->ifa) {
+ /* Yep, purge src address selected */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ sctp_rtentry_t *rt;
+
+ /* delete this address if cached */
+ rt = net->ro.ro_rt;
+ if (rt != NULL) {
+ RTFREE(rt);
+ net->ro.ro_rt = NULL;
+ }
+#endif
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } /* for each tcb */
+ /* remove it from the ep list */
+ sctp_remove_laddr(laddr);
+ inp->laddr_count--;
+ /* update inp_vflag flags */
+ sctp_update_ep_vflag(inp);
+ }
+ return;
+}
+
+/*
+ * Add the address to the TCB local address restricted list.
+ * This is a "pending" address list (eg. addresses waiting for an
+ * ASCONF-ACK response) and cannot be used as a valid source address.
+ */
+void
+sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+ struct sctpladdr *list;
+
+ /*
+ * Assumes TCB is locked.. and possibly the INP. May need to
+ * confirm/fix that if we need it and is not the case.
+ */
+ list = &stcb->asoc.sctp_restricted_addrs;
+
+#ifdef INET6
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-existent addr. */
+ return;
+ }
+ }
+#endif
+ /* does the address already exist? */
+ LIST_FOREACH(laddr, list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ return;
+ }
+ }
+
+ /* add to the list */
+ (void)sctp_insert_laddr(list, ifa, 0);
+ return;
+}
+
+/*
+ * Remove a local address from the TCB local address restricted list
+ */
+void
+sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+
+ /*
+ * This is called by asconf work. It is assumed that a) The TCB is
+ * locked and b) The INP is locked. This is true in as much as I can
+ * trace through the entry asconf code where I did these locks.
+ * Again, the ASCONF code is a bit different in that it does lock
+ * the INP during its work often times. This must be since we don't
+ * want other proc's looking up things while what they are looking
+ * up is changing :-D
+ */
+
+ inp = stcb->sctp_ep;
+ /* if subset bound and don't allow ASCONF's, can't delete last */
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ if (stcb->sctp_ep->laddr_count < 2) {
+ /* can't delete last address */
+ return;
+ }
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+ /* remove the address if it exists */
+ if (laddr->ifa == NULL)
+ continue;
+ if (laddr->ifa == ifa) {
+ sctp_remove_laddr(laddr);
+ return;
+ }
+ }
+
+ /* address not found! */
+ return;
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+/* sysctl */
+static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
+static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+struct sctp_mcore_ctrl *sctp_mcore_workers = NULL;
+int *sctp_cpuarry = NULL;
+
+void
+sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use)
+{
+ /* Queue a packet to a processor for the specified core */
+ struct sctp_mcore_queue *qent;
+ struct sctp_mcore_ctrl *wkq;
+ int need_wake = 0;
+
+ if (sctp_mcore_workers == NULL) {
+ /* Something went way bad during setup */
+ sctp_input_with_port(m, off, 0);
+ return;
+ }
+ SCTP_MALLOC(qent, struct sctp_mcore_queue *,
+ (sizeof(struct sctp_mcore_queue)),
+ SCTP_M_MCORE);
+ if (qent == NULL) {
+ /* This is trouble */
+ sctp_input_with_port(m, off, 0);
+ return;
+ }
+ qent->vn = curvnet;
+ qent->m = m;
+ qent->off = off;
+ qent->v6 = 0;
+ wkq = &sctp_mcore_workers[cpu_to_use];
+ SCTP_MCORE_QLOCK(wkq);
+
+ TAILQ_INSERT_TAIL(&wkq->que, qent, next);
+ if (wkq->running == 0) {
+ need_wake = 1;
+ }
+ SCTP_MCORE_QUNLOCK(wkq);
+ if (need_wake) {
+ wakeup(&wkq->running);
+ }
+}
+
+static void
+sctp_mcore_thread(void *arg)
+{
+
+ struct sctp_mcore_ctrl *wkq;
+ struct sctp_mcore_queue *qent;
+
+ wkq = (struct sctp_mcore_ctrl *)arg;
+ struct mbuf *m;
+ int off, v6;
+
+ /* Wait for first tickle */
+ SCTP_MCORE_LOCK(wkq);
+ wkq->running = 0;
+ msleep(&wkq->running,
+ &wkq->core_mtx,
+ 0, "wait for pkt", 0);
+ SCTP_MCORE_UNLOCK(wkq);
+
+ /* Bind to our cpu */
+ thread_lock(curthread);
+ sched_bind(curthread, wkq->cpuid);
+ thread_unlock(curthread);
+
+ /* Now lets start working */
+ SCTP_MCORE_LOCK(wkq);
+ /* Now grab lock and go */
+ for (;;) {
+ SCTP_MCORE_QLOCK(wkq);
+ skip_sleep:
+ wkq->running = 1;
+ qent = TAILQ_FIRST(&wkq->que);
+ if (qent) {
+ TAILQ_REMOVE(&wkq->que, qent, next);
+ SCTP_MCORE_QUNLOCK(wkq);
+ CURVNET_SET(qent->vn);
+ m = qent->m;
+ off = qent->off;
+ v6 = qent->v6;
+ SCTP_FREE(qent, SCTP_M_MCORE);
+ if (v6 == 0) {
+ sctp_input_with_port(m, off, 0);
+ } else {
+ SCTP_PRINTF("V6 not yet supported\n");
+ sctp_m_freem(m);
+ }
+ CURVNET_RESTORE();
+ SCTP_MCORE_QLOCK(wkq);
+ }
+ wkq->running = 0;
+ if (!TAILQ_EMPTY(&wkq->que)) {
+ goto skip_sleep;
+ }
+ SCTP_MCORE_QUNLOCK(wkq);
+ msleep(&wkq->running,
+ &wkq->core_mtx,
+ 0, "wait for pkt", 0);
+ }
+}
+
+static void
+sctp_startup_mcore_threads(void)
+{
+ int i, cpu;
+
+ if (mp_ncpus == 1)
+ return;
+
+ if (sctp_mcore_workers != NULL) {
+ /* Already been here in some previous
+ * vnet?
+ */
+ return;
+ }
+ SCTP_MALLOC(sctp_mcore_workers, struct sctp_mcore_ctrl *,
+ ((mp_maxid+1) * sizeof(struct sctp_mcore_ctrl)),
+ SCTP_M_MCORE);
+ if (sctp_mcore_workers == NULL) {
+ /* TSNH I hope */
+ return;
+ }
+ memset(sctp_mcore_workers, 0 , ((mp_maxid+1) *
+ sizeof(struct sctp_mcore_ctrl)));
+ /* Init the structures */
+ for (i = 0; i<=mp_maxid; i++) {
+ TAILQ_INIT(&sctp_mcore_workers[i].que);
+ SCTP_MCORE_LOCK_INIT(&sctp_mcore_workers[i]);
+ SCTP_MCORE_QLOCK_INIT(&sctp_mcore_workers[i]);
+ sctp_mcore_workers[i].cpuid = i;
+ }
+ if (sctp_cpuarry == NULL) {
+ SCTP_MALLOC(sctp_cpuarry, int *,
+ (mp_ncpus * sizeof(int)),
+ SCTP_M_MCORE);
+ i = 0;
+ CPU_FOREACH(cpu) {
+ sctp_cpuarry[i] = cpu;
+ i++;
+ }
+ }
+ /* Now start them all */
+ CPU_FOREACH(cpu) {
+ (void)kproc_create(sctp_mcore_thread,
+ (void *)&sctp_mcore_workers[cpu],
+ &sctp_mcore_workers[cpu].thread_proc,
+ RFPROC,
+ SCTP_KTHREAD_PAGES,
+ SCTP_MCORE_NAME);
+ }
+}
+#endif
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_NOT_YET)
+static struct mbuf *
+sctp_netisr_hdlr(struct mbuf *m, uintptr_t source)
+{
+ struct ip *ip;
+ struct sctphdr *sh;
+ int offset;
+ uint32_t flowid, tag;
+
+ /*
+ * No flow id built by lower layers fix it so we
+ * create one.
+ */
+ ip = mtod(m, struct ip *);
+ offset = (ip->ip_hl << 2) + sizeof(struct sctphdr);
+ if (SCTP_BUF_LEN(m) < offset) {
+ if ((m = m_pullup(m, offset)) == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return (NULL);
+ }
+ ip = mtod(m, struct ip *);
+ }
+ sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ tag = htonl(sh->v_tag);
+ flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
+ m->m_pkthdr.flowid = flowid;
+ /* FIX ME */
+ m->m_flags |= M_FLOWID;
+ return (m);
+}
+
+#endif
+#endif
+void
+#if defined(__Userspace__)
+sctp_pcb_init(int start_threads)
+#else
+sctp_pcb_init(void)
+#endif
+{
+ /*
+ * SCTP initialization for the PCB structures should be called by
+ * the sctp_init() function.
+ */
+ int i;
+ struct timeval tv;
+
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) {
+ /* error I was called twice */
+ return;
+ }
+ SCTP_BASE_VAR(sctp_pcb_initialized) = 1;
+
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if !defined(_WIN32)
+ pthread_mutexattr_init(&SCTP_BASE_VAR(mtx_attr));
+#ifdef INVARIANTS
+ pthread_mutexattr_settype(&SCTP_BASE_VAR(mtx_attr), PTHREAD_MUTEX_ERRORCHECK);
+#endif
+#endif
+#endif
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(_WIN32) && !defined(__Userspace__)
+ if (SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+ memset(SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+ }
+#else
+ memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *,
+ ((mp_maxid+1) * sizeof(struct sctpstat)),
+ SCTP_M_MCORE);
+#endif
+#endif
+ (void)SCTP_GETTIME_TIMEVAL(&tv);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ memset(SCTP_BASE_STATS, 0, sizeof(struct sctpstat) * (mp_maxid+1));
+ SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec;
+ SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec;
+#else
+ memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat));
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec;
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec;
+#endif
+#else
+ memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat));
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec;
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec;
+#endif
+ /* init the empty list of (All) Endpoints */
+ LIST_INIT(&SCTP_BASE_INFO(listhead));
+#if defined(__APPLE__) && !defined(__Userspace__)
+ LIST_INIT(&SCTP_BASE_INFO(inplisthead));
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+ SCTP_BASE_INFO(sctbinfo).listhead = &SCTP_BASE_INFO(inplisthead);
+ SCTP_BASE_INFO(sctbinfo).mtx_grp_attr = lck_grp_attr_alloc_init();
+ lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+ SCTP_BASE_INFO(sctbinfo).mtx_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+ SCTP_BASE_INFO(sctbinfo).mtx_attr = lck_attr_alloc_init();
+ lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+ SCTP_BASE_INFO(sctbinfo).ipi_listhead = &SCTP_BASE_INFO(inplisthead);
+ SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr = lck_grp_attr_alloc_init();
+ lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+ SCTP_BASE_INFO(sctbinfo).ipi_lock_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+ SCTP_BASE_INFO(sctbinfo).ipi_lock_attr = lck_attr_alloc_init();
+ lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+ SCTP_BASE_INFO(sctbinfo).ipi_gc = sctp_gc;
+ in_pcbinfo_attach(&SCTP_BASE_INFO(sctbinfo));
+#endif
+#endif
+
+
+ /* init the hash table of endpoints */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize));
+ TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize));
+ TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale));
+#endif
+ SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31),
+ &SCTP_BASE_INFO(hashasocmark));
+ SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+ &SCTP_BASE_INFO(hashmark));
+ SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+ &SCTP_BASE_INFO(hashtcpmark));
+ SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
+
+
+ SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
+ &SCTP_BASE_INFO(hashvrfmark));
+
+ SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
+ &SCTP_BASE_INFO(vrf_ifn_hashmark));
+ /* init the zones */
+ /*
+ * FIX ME: Should check for NULL returns, but if it does fail we are
+ * doomed to panic anyways... add later maybe.
+ */
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep",
+ sizeof(struct sctp_inpcb), maxsockets);
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc",
+ sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr",
+ sizeof(struct sctp_laddr),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr",
+ sizeof(struct sctp_nets),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk",
+ sizeof(struct sctp_tmit_chunk),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq",
+ sizeof(struct sctp_queued_to_read),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out",
+ sizeof(struct sctp_stream_queue_pending),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf",
+ sizeof(struct sctp_asconf),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack",
+ sizeof(struct sctp_asconf_ack),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+
+ /* Master Lock INIT for info structure */
+ SCTP_INP_INFO_LOCK_INIT();
+ SCTP_STATLOG_INIT_LOCK();
+
+ SCTP_IPI_COUNT_INIT();
+ SCTP_IPI_ADDR_INIT();
+#ifdef SCTP_PACKET_LOGGING
+ SCTP_IP_PKTLOG_INIT();
+#endif
+ LIST_INIT(&SCTP_BASE_INFO(addr_wq));
+
+ SCTP_WQ_ADDR_INIT();
+ /* not sure if we need all the counts */
+ SCTP_BASE_INFO(ipi_count_ep) = 0;
+ /* assoc/tcb zone info */
+ SCTP_BASE_INFO(ipi_count_asoc) = 0;
+ /* local addrlist zone info */
+ SCTP_BASE_INFO(ipi_count_laddr) = 0;
+ /* remote addrlist zone info */
+ SCTP_BASE_INFO(ipi_count_raddr) = 0;
+ /* chunk info */
+ SCTP_BASE_INFO(ipi_count_chunk) = 0;
+
+ /* socket queue zone info */
+ SCTP_BASE_INFO(ipi_count_readq) = 0;
+
+ /* stream out queue cont */
+ SCTP_BASE_INFO(ipi_count_strmoq) = 0;
+
+ SCTP_BASE_INFO(ipi_free_strmoq) = 0;
+ SCTP_BASE_INFO(ipi_free_chunks) = 0;
+
+ SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer));
+
+ /* Init the TIMEWAIT list */
+ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+ LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]);
+ }
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(_WIN32)
+ InitializeConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+ (void)pthread_cond_init(&sctp_it_ctl.iterator_wakeup, NULL);
+#endif
+#endif
+ sctp_startup_iterator();
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+ sctp_startup_mcore_threads();
+#endif
+#endif
+
+ /*
+ * INIT the default VRF which for BSD is the only one, other O/S's
+ * may have more. But initially they must start with one and then
+ * add the VRF's as addresses are added.
+ */
+ sctp_init_vrf_list(SCTP_DEFAULT_VRF);
+#if defined(__FreeBSD__) && !defined(__Userspace__) && defined(SCTP_NOT_YET)
+ if (ip_register_flow_handler(sctp_netisr_hdlr, IPPROTO_SCTP)) {
+ SCTP_PRINTF("***SCTP- Error can't register netisr handler***\n");
+ }
+#endif
+#if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_)
+ /* allocate the lock for the callout/timer queue */
+ SCTP_TIMERQ_LOCK_INIT();
+ TAILQ_INIT(&SCTP_BASE_INFO(callqueue));
+#endif
+#if defined(__Userspace__)
+ mbuf_initialize(NULL);
+ atomic_init();
+#if defined(INET) || defined(INET6)
+ if (start_threads)
+ recv_thread_init();
+#endif
+#endif
+}
+
+/*
+ * Assumes that the SCTP_BASE_INFO() lock is NOT held.
+ */
+void
+sctp_pcb_finish(void)
+{
+ struct sctp_vrflist *vrf_bucket;
+ struct sctp_vrf *vrf, *nvrf;
+ struct sctp_ifn *ifn, *nifn;
+ struct sctp_ifa *ifa, *nifa;
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block, *prev_twait_block;
+ struct sctp_laddr *wi, *nwi;
+ int i;
+ struct sctp_iterator *it, *nit;
+
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ SCTP_PRINTF("%s: race condition on teardown.\n", __func__);
+ return;
+ }
+ SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ /* Notify the iterator to exit. */
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_MUST_EXIT;
+ sctp_wakeup_iterator();
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+ in_pcbinfo_detach(&SCTP_BASE_INFO(sctbinfo));
+#endif
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ do {
+ msleep(&sctp_it_ctl.iterator_flags,
+ sctp_it_ctl.ipi_iterator_wq_mtx,
+ 0, "waiting_for_work", 0);
+ } while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_EXITED) == 0);
+ thread_deallocate(sctp_it_ctl.thread_proc);
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+ if (sctp_it_ctl.iterator_thread_obj != NULL) {
+ NTSTATUS status = STATUS_SUCCESS;
+
+ KeSetEvent(&sctp_it_ctl.iterator_wakeup[1], IO_NO_INCREMENT, FALSE);
+ status = KeWaitForSingleObject(sctp_it_ctl.iterator_thread_obj,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+ ObDereferenceObject(sctp_it_ctl.iterator_thread_obj);
+ }
+#endif
+#if defined(__Userspace__)
+ if (SCTP_BASE_VAR(iterator_thread_started)) {
+#if defined(_WIN32)
+ WaitForSingleObject(sctp_it_ctl.thread_proc, INFINITE);
+ CloseHandle(sctp_it_ctl.thread_proc);
+ sctp_it_ctl.thread_proc = NULL;
+#else
+ pthread_join(sctp_it_ctl.thread_proc, NULL);
+ sctp_it_ctl.thread_proc = 0;
+#endif
+ }
+#endif
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(_WIN32)
+ DeleteConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+ pthread_cond_destroy(&sctp_it_ctl.iterator_wakeup);
+ pthread_mutexattr_destroy(&SCTP_BASE_VAR(mtx_attr));
+#endif
+#endif
+ /* In FreeBSD the iterator thread never exits
+ * but we do clean up.
+ * The only way FreeBSD reaches here is if we have VRF's
+ * but we still add the ifdef to make it compile on old versions.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+retry:
+#endif
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ /*
+ * sctp_iterator_worker() might be working on an it entry without
+ * holding the lock. We won't find it on the list either and
+ * continue and free/destroy it. While holding the lock, spin, to
+ * avoid the race condition as sctp_iterator_worker() will have to
+ * wait to re-acquire the lock.
+ */
+ if (sctp_it_ctl.iterator_running != 0 || sctp_it_ctl.cur_it != NULL) {
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ SCTP_PRINTF("%s: Iterator running while we held the lock. Retry. "
+ "cur_it=%p\n", __func__, sctp_it_ctl.cur_it);
+ DELAY(10);
+ goto retry;
+ }
+#endif
+ TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (it->vn != curvnet) {
+ continue;
+ }
+#endif
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it,SCTP_M_ITER);
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_ITERATOR_LOCK();
+ if ((sctp_it_ctl.cur_it) &&
+ (sctp_it_ctl.cur_it->vn == curvnet)) {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+ }
+ SCTP_ITERATOR_UNLOCK();
+#endif
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ SCTP_IPI_ITERATOR_WQ_DESTROY();
+ SCTP_ITERATOR_LOCK_DESTROY();
+#endif
+ SCTP_OS_TIMER_STOP_DRAIN(&SCTP_BASE_INFO(addr_wq_timer.timer));
+ SCTP_WQ_ADDR_LOCK();
+ LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
+ LIST_REMOVE(wi, sctp_nxt_addr);
+ SCTP_DECR_LADDR_COUNT();
+ if (wi->action == SCTP_DEL_IP_ADDRESS) {
+ SCTP_FREE(wi->ifa, SCTP_M_IFA);
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi);
+ }
+ SCTP_WQ_ADDR_UNLOCK();
+
+ /*
+ * free the vrf/ifn/ifa lists and hashes (be sure address monitor
+ * is destroyed first).
+ */
+ SCTP_IPI_ADDR_WLOCK();
+ vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))];
+ LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) {
+ LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) {
+ LIST_FOREACH_SAFE(ifa, &ifn->ifalist, next_ifa, nifa) {
+ /* free the ifa */
+ LIST_REMOVE(ifa, next_bucket);
+ LIST_REMOVE(ifa, next_ifa);
+ SCTP_FREE(ifa, SCTP_M_IFA);
+ }
+ /* free the ifn */
+ LIST_REMOVE(ifn, next_bucket);
+ LIST_REMOVE(ifn, next_ifn);
+ SCTP_FREE(ifn, SCTP_M_IFN);
+ }
+ SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+ /* free the vrf */
+ LIST_REMOVE(vrf, next_vrf);
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ }
+ SCTP_IPI_ADDR_WUNLOCK();
+ /* free the vrf hashes */
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark));
+ SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark));
+
+ /* free the TIMEWAIT list elements malloc'd in the function
+ * sctp_add_vtag_to_timewait()...
+ */
+ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+ chain = &SCTP_BASE_INFO(vtag_timewait)[i];
+ if (!LIST_EMPTY(chain)) {
+ prev_twait_block = NULL;
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ if (prev_twait_block) {
+ SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+ }
+ prev_twait_block = twait_block;
+ }
+ SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+ }
+ }
+
+ /* free the locks and mutexes */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_TIMERQ_LOCK_DESTROY();
+#endif
+#ifdef SCTP_PACKET_LOGGING
+ SCTP_IP_PKTLOG_DESTROY();
+#endif
+ SCTP_IPI_ADDR_DESTROY();
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_IPI_COUNT_DESTROY();
+#endif
+ SCTP_STATLOG_DESTROY();
+ SCTP_INP_INFO_LOCK_DESTROY();
+
+ SCTP_WQ_ADDR_DESTROY();
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+ lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+ lck_grp_free(SCTP_BASE_INFO(sctbinfo).mtx_grp);
+ lck_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+ lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+ lck_grp_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp);
+ lck_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#endif
+#if defined(__Userspace__)
+ SCTP_TIMERQ_LOCK_DESTROY();
+ SCTP_ZONE_DESTROY(zone_mbuf);
+ SCTP_ZONE_DESTROY(zone_clust);
+ SCTP_ZONE_DESTROY(zone_ext_refcnt);
+#endif
+ /* Get rid of other stuff too. */
+ if (SCTP_BASE_INFO(sctp_asochash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark));
+ if (SCTP_BASE_INFO(sctp_ephash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
+ if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
+
+#if defined(_WIN32) || defined(__FreeBSD__) || defined(__Userspace__)
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack));
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE);
+#endif
+#endif
+}
+
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
+ int offset, int limit,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sockaddr *altsa, uint16_t port)
+{
+ /*
+ * grub through the INIT pulling addresses and loading them to the
+ * nets structure in the asoc. The from address in the mbuf should
+ * also be loaded (if it is not already). This routine can be called
+ * with either INIT or INIT-ACK's as long as the m points to the IP
+ * packet and the offset points to the beginning of the parameters.
+ */
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net, *nnet, *net_tmp;
+ struct sctp_paramhdr *phdr, param_buf;
+ struct sctp_tcb *stcb_tmp;
+ uint16_t ptype, plen;
+ struct sockaddr *sa;
+ uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_random *p_random = NULL;
+ uint16_t random_len = 0;
+ uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ uint8_t saw_asconf = 0;
+ uint8_t saw_asconf_ack = 0;
+ uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+ int got_random = 0, got_hmacs = 0, got_chklist = 0;
+ uint8_t peer_supports_ecn;
+ uint8_t peer_supports_prsctp;
+ uint8_t peer_supports_auth;
+ uint8_t peer_supports_asconf;
+ uint8_t peer_supports_asconf_ack;
+ uint8_t peer_supports_reconfig;
+ uint8_t peer_supports_nrsack;
+ uint8_t peer_supports_pktdrop;
+ uint8_t peer_supports_idata;
+#ifdef INET
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+
+ /* First get the destination address setup too. */
+#ifdef INET
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin.sin_len = sizeof(sin);
+#endif
+ sin.sin_port = stcb->rport;
+#endif
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6.sin6_port = stcb->rport;
+#endif
+ if (altsa) {
+ sa = altsa;
+ } else {
+ sa = src;
+ }
+ peer_supports_idata = 0;
+ peer_supports_ecn = 0;
+ peer_supports_prsctp = 0;
+ peer_supports_auth = 0;
+ peer_supports_asconf = 0;
+ peer_supports_reconfig = 0;
+ peer_supports_nrsack = 0;
+ peer_supports_pktdrop = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* mark all addresses that we have currently on the list */
+ net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ /* does the source address already exist? if so skip it */
+ inp = stcb->sctp_ep;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, dst, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
+ /* we must add the source address */
+ /* no scope set here since we have a tcb already. */
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) {
+ return (-1);
+ }
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
+ return (-2);
+ }
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (stcb->asoc.scope.conn_addr_legal) {
+ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
+ return (-2);
+ }
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ } else {
+ if (net_tmp != NULL && stcb_tmp == stcb) {
+ net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
+ } else if (stcb_tmp != stcb) {
+ /* It belongs to another association? */
+ if (stcb_tmp)
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ return (-3);
+ }
+ }
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-4);
+ }
+ /* now we must go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &param_buf, sizeof(param_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ /*
+ * SCTP_PRINTF("ptype => %0x, plen => %d\n", (uint32_t)ptype,
+ * (int)plen);
+ */
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen < sizeof(struct sctp_paramhdr)) {
+ break;
+ }
+#ifdef INET
+ if (ptype == SCTP_IPV4_ADDRESS) {
+ if (stcb->asoc.scope.ipv4_addr_legal) {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ /* ok get the v4 address and check/add */
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p4_buf,
+ sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (-5);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin.sin_addr.s_addr = p4->addr;
+ if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ /* Skip multi-cast addresses */
+ goto next_param;
+ }
+ if ((sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ (sin.sin_addr.s_addr == INADDR_ANY)) {
+ goto next_param;
+ }
+ sa = (struct sockaddr *)&sin;
+ inp = stcb->sctp_ep;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ dst, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
+ inp == NULL) {
+ /* we must add the source address */
+ /*
+ * no scope set since we have a tcb
+ * already
+ */
+
+ /*
+ * we must validate the state again
+ * here
+ */
+ add_it_now:
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-7);
+ }
+ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) {
+ return (-8);
+ }
+ } else if (stcb_tmp == stcb) {
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-10);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb_tmp) {
+ if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ /* in setup state we abort this guy */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "%s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_abort_an_association(stcb_tmp->sctp_ep,
+ stcb_tmp, op_err,
+ SCTP_SO_NOT_LOCKED);
+ goto add_it_now;
+ }
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ }
+
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-12);
+ }
+ return (-13);
+ }
+ }
+ } else
+#endif
+#ifdef INET6
+ if (ptype == SCTP_IPV6_ADDRESS) {
+ if (stcb->asoc.scope.ipv6_addr_legal) {
+ /* ok get the v6 address and check/add */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p6_buf,
+ sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (-14);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+ /* Skip multi-cast addresses */
+ goto next_param;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+ /* Link local make no sense without scope */
+ goto next_param;
+ }
+ sa = (struct sockaddr *)&sin6;
+ inp = stcb->sctp_ep;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ dst, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb_tmp == NULL &&
+ (inp == stcb->sctp_ep || inp == NULL)) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ add_it_now6:
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-16);
+ }
+ /*
+ * we must add the address, no scope
+ * set
+ */
+ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) {
+ return (-17);
+ }
+ } else if (stcb_tmp == stcb) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-19);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb_tmp) {
+ if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) {
+ struct mbuf *op_err;
+ char msg[SCTP_DIAG_INFO_LEN];
+
+ /* in setup state we abort this guy */
+ SCTP_SNPRINTF(msg, sizeof(msg),
+ "%s:%d at %s", __FILE__, __LINE__, __func__);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ msg);
+ sctp_abort_an_association(stcb_tmp->sctp_ep,
+ stcb_tmp, op_err,
+ SCTP_SO_NOT_LOCKED);
+ goto add_it_now6;
+ }
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ }
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-21);
+ }
+ return (-22);
+ }
+ }
+ } else
+#endif
+ if (ptype == SCTP_ECN_CAPABLE) {
+ peer_supports_ecn = 1;
+ } else if (ptype == SCTP_ULP_ADAPTATION) {
+ if (stcb->asoc.state != SCTP_STATE_OPEN) {
+ struct sctp_adaptation_layer_indication ai, *aip;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ai, sizeof(ai));
+ aip = (struct sctp_adaptation_layer_indication *)phdr;
+ if (aip) {
+ stcb->asoc.peers_adaptation = ntohl(aip->indication);
+ stcb->asoc.adaptation_needed = 1;
+ }
+ }
+ } else if (ptype == SCTP_SET_PRIM_ADDR) {
+ struct sctp_asconf_addr_param lstore, *fee;
+ int lptype;
+ struct sockaddr *lsa = NULL;
+#ifdef INET
+ struct sctp_asconf_addrv4_param *fii;
+#endif
+
+ if (stcb->asoc.asconf_supported == 0) {
+ return (-100);
+ }
+ if (plen > sizeof(lstore)) {
+ return (-23);
+ }
+ if (plen < sizeof(struct sctp_asconf_addrv4_param)) {
+ return (-101);
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&lstore,
+ plen);
+ if (phdr == NULL) {
+ return (-24);
+ }
+ fee = (struct sctp_asconf_addr_param *)phdr;
+ lptype = ntohs(fee->addrp.ph.param_type);
+ switch (lptype) {
+#ifdef INET
+ case SCTP_IPV4_ADDRESS:
+ if (plen !=
+ sizeof(struct sctp_asconf_addrv4_param)) {
+ SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addrv4_param),
+ plen);
+ } else {
+ fii = (struct sctp_asconf_addrv4_param *)fee;
+ sin.sin_addr.s_addr = fii->addrp.addr;
+ lsa = (struct sockaddr *)&sin;
+ }
+ break;
+#endif
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ if (plen !=
+ sizeof(struct sctp_asconf_addr_param)) {
+ SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addr_param),
+ plen);
+ } else {
+ memcpy(sin6.sin6_addr.s6_addr,
+ fee->addrp.addr,
+ sizeof(fee->addrp.addr));
+ lsa = (struct sockaddr *)&sin6;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ if (lsa) {
+ (void)sctp_set_primary_addr(stcb, sa, NULL);
+ }
+ } else if (ptype == SCTP_HAS_NAT_SUPPORT) {
+ stcb->asoc.peer_supports_nat = 1;
+ } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
+ /* Peer supports pr-sctp */
+ peer_supports_prsctp = 1;
+ } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+ int num_ent, i;
+
+ if (plen > sizeof(local_store)) {
+ return (-35);
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store, plen);
+ if (phdr == NULL) {
+ return (-25);
+ }
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ peer_supports_asconf = 1;
+ break;
+ case SCTP_ASCONF_ACK:
+ peer_supports_asconf_ack = 1;
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ peer_supports_prsctp = 1;
+ break;
+ case SCTP_PACKET_DROPPED:
+ peer_supports_pktdrop = 1;
+ break;
+ case SCTP_NR_SELECTIVE_ACK:
+ peer_supports_nrsack = 1;
+ break;
+ case SCTP_STREAM_RESET:
+ peer_supports_reconfig = 1;
+ break;
+ case SCTP_AUTHENTICATION:
+ peer_supports_auth = 1;
+ break;
+ case SCTP_IDATA:
+ peer_supports_idata = 1;
+ break;
+ default:
+ /* one I have not learned yet */
+ break;
+
+ }
+ }
+ } else if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(random_store))
+ break;
+ if (got_random) {
+ /* already processed a RANDOM */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)random_store,
+ plen);
+ if (phdr == NULL)
+ return (-26);
+ p_random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*p_random);
+ /* enforce the random length */
+ if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
+ SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n");
+ return (-27);
+ }
+ got_random = 1;
+ } else if (ptype == SCTP_HMAC_LIST) {
+ uint16_t num_hmacs;
+ uint16_t i;
+
+ if (plen > sizeof(hmacs_store))
+ break;
+ if (got_hmacs) {
+ /* already processed a HMAC list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)hmacs_store,
+ plen);
+ if (phdr == NULL)
+ return (-28);
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+ return (-29);
+ }
+ if (stcb->asoc.peer_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.peer_hmacs);
+ stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.peer_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ (void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ got_hmacs = 1;
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(chunks_store))
+ break;
+ if (got_chklist) {
+ /* already processed a Chunks list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store,
+ plen);
+ if (phdr == NULL)
+ return (-30);
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ if (stcb->asoc.peer_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
+ else
+ stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.peer_auth_chunks);
+ /* record asconf/asconf-ack if listed */
+ if (chunks->chunk_types[i] == SCTP_ASCONF)
+ saw_asconf = 1;
+ if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+ saw_asconf_ack = 1;
+
+ }
+ got_chklist = 1;
+ } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
+ (ptype == SCTP_STATE_COOKIE) ||
+ (ptype == SCTP_UNRECOG_PARAM) ||
+ (ptype == SCTP_COOKIE_PRESERVE) ||
+ (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
+ (ptype == SCTP_ADD_IP_ADDRESS) ||
+ (ptype == SCTP_DEL_IP_ADDRESS) ||
+ (ptype == SCTP_ERROR_CAUSE_IND) ||
+ (ptype == SCTP_SUCCESS_REPORT)) {
+ /* don't care */ ;
+ } else {
+ if ((ptype & 0x8000) == 0x0000) {
+ /*
+ * must stop processing the rest of the
+ * param's. Any report bits were handled
+ * with the call to
+ * sctp_arethere_unrecognized_parameters()
+ * when the INIT or INIT-ACK was first seen.
+ */
+ break;
+ }
+ }
+
+ next_param:
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &param_buf,
+ sizeof(param_buf));
+ }
+ /* Now check to see if we need to purge any addresses */
+ TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) {
+ if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
+ SCTP_ADDR_NOT_IN_ASSOC) {
+ /* This address has been removed from the asoc */
+ /* remove and free it */
+ stcb->asoc.numnets--;
+ TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ if (net == stcb->asoc.primary_destination) {
+ stcb->asoc.primary_destination = NULL;
+ sctp_select_primary_destination(stcb);
+ }
+ }
+ }
+ if ((stcb->asoc.ecn_supported == 1) &&
+ (peer_supports_ecn == 0)) {
+ stcb->asoc.ecn_supported = 0;
+ }
+ if ((stcb->asoc.prsctp_supported == 1) &&
+ (peer_supports_prsctp == 0)) {
+ stcb->asoc.prsctp_supported = 0;
+ }
+ if ((stcb->asoc.auth_supported == 1) &&
+ ((peer_supports_auth == 0) ||
+ (got_random == 0) || (got_hmacs == 0))) {
+ stcb->asoc.auth_supported = 0;
+ }
+ if ((stcb->asoc.asconf_supported == 1) &&
+ ((peer_supports_asconf == 0) || (peer_supports_asconf_ack == 0) ||
+ (stcb->asoc.auth_supported == 0) ||
+ (saw_asconf == 0) || (saw_asconf_ack == 0))) {
+ stcb->asoc.asconf_supported = 0;
+ }
+ if ((stcb->asoc.reconfig_supported == 1) &&
+ (peer_supports_reconfig == 0)) {
+ stcb->asoc.reconfig_supported = 0;
+ }
+ if ((stcb->asoc.idata_supported == 1) &&
+ (peer_supports_idata == 0)) {
+ stcb->asoc.idata_supported = 0;
+ }
+ if ((stcb->asoc.nrsack_supported == 1) &&
+ (peer_supports_nrsack == 0)) {
+ stcb->asoc.nrsack_supported = 0;
+ }
+ if ((stcb->asoc.pktdrop_supported == 1) &&
+ (peer_supports_pktdrop == 0)){
+ stcb->asoc.pktdrop_supported = 0;
+ }
+ /* validate authentication required parameters */
+ if ((peer_supports_auth == 0) && (got_chklist == 1)) {
+ /* peer does not support auth but sent a chunks list? */
+ return (-31);
+ }
+ if ((peer_supports_asconf == 1) && (peer_supports_auth == 0)) {
+ /* peer supports asconf but not auth? */
+ return (-32);
+ } else if ((peer_supports_asconf == 1) &&
+ (peer_supports_auth == 1) &&
+ ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+ return (-33);
+ }
+ /* concatenate the full random key */
+ keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+ if (chunks != NULL) {
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (p_random != NULL) {
+ keylen = sizeof(*p_random) + random_len;
+ memcpy(new_key->key, p_random, keylen);
+ } else {
+ keylen = 0;
+ }
+ /* append in the AUTH chunks */
+ if (chunks != NULL) {
+ memcpy(new_key->key + keylen, chunks,
+ sizeof(*chunks) + num_chunks);
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ /* append in the HMACs */
+ if (hmacs != NULL) {
+ memcpy(new_key->key + keylen, hmacs,
+ sizeof(*hmacs) + hmacs_len);
+ }
+ } else {
+ /* failed to get memory for the key */
+ return (-34);
+ }
+ if (stcb->asoc.authinfo.peer_random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.peer_random);
+ stcb->asoc.authinfo.peer_random = new_key;
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ return (0);
+}
+
+int
+sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
+ struct sctp_nets *net)
+{
+ /* make sure the requested primary address exists in the assoc */
+ if (net == NULL && sa)
+ net = sctp_findnet(stcb, sa);
+
+ if (net == NULL) {
+ /* didn't find the requested primary address! */
+ return (-1);
+ } else {
+ /* set the primary address */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /* Must be confirmed, so queue to set */
+ net->dest_state |= SCTP_ADDR_REQ_PRIMARY;
+ return (0);
+ }
+ stcb->asoc.primary_destination = net;
+ if (!(net->dest_state & SCTP_ADDR_PF) && (stcb->asoc.alternate)) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net != stcb->asoc.primary_destination) {
+ /* first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficient if
+ * the primary is the first on the list, make it
+ * so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+ }
+}
+
+int
+sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now)
+{
+ /*
+ * This function serves two purposes. It will see if a TAG can be
+ * re-used and return 1 for yes it is ok and 0 for don't use that
+ * tag. A secondary function it will do is purge out old tags that
+ * can be removed.
+ */
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct sctpasochead *head;
+ struct sctp_tcb *stcb;
+ int i;
+
+ SCTP_INP_INFO_RLOCK();
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+ SCTP_BASE_INFO(hashasocmark))];
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ /* We choose not to lock anything here. TCB's can't be
+ * removed since we have the read lock, so they can't
+ * be freed on us, same thing for the INP. I may
+ * be wrong with this assumption, but we will go
+ * with it for now :-)
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ continue;
+ }
+ if (stcb->asoc.my_vtag == tag) {
+ /* candidate */
+ if (stcb->rport != rport) {
+ continue;
+ }
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ continue;
+ }
+ /* Its a used tag set */
+ SCTP_INP_INFO_RUNLOCK();
+ return (0);
+ }
+ }
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ /* Now what about timed wait ? */
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ /*
+ * Block(s) are present, lets see if we have this tag in the
+ * list
+ */
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if (twait_block->vtag_block[i].v_tag == 0) {
+ /* not used */
+ continue;
+ } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire <
+ now->tv_sec) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ } else if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ /* Bad tag, sorry :< */
+ SCTP_INP_INFO_RUNLOCK();
+ return (0);
+ }
+ }
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (1);
+}
+
+static void
+sctp_drain_mbufs(struct sctp_tcb *stcb)
+{
+ /*
+ * We must hunt this association for MBUF's past the cumack (i.e.
+ * out of order data that we can renege on).
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *nchk;
+ uint32_t cumulative_tsn_p1;
+ struct sctp_queued_to_read *control, *ncontrol;
+ int cnt, strmat;
+ uint32_t gap, i;
+ int fnd = 0;
+
+ /* We look for anything larger than the cum-ack + 1 */
+
+ asoc = &stcb->asoc;
+ if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
+ /* none we can reneg on. */
+ return;
+ }
+ SCTP_STAT_INCR(sctps_protocol_drains_done);
+ cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
+ cnt = 0;
+ /* Ok that was fun, now we will drain all the inbound streams? */
+ for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
+ TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].inqueue, next_instrm, ncontrol) {
+#ifdef INVARIANTS
+ if (control->on_strm_q != SCTP_ON_ORDERED ) {
+ panic("Huh control: %p on_q: %d -- not ordered?",
+ control, control->on_strm_q);
+ }
+#endif
+ if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn);
+ KASSERT(control->length > 0, ("control has zero length"));
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (control->on_read_q) {
+ TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
+ control->on_read_q = 0;
+ }
+ TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_free_remote_addr(control->whoFrom);
+ /* Now its reasm? */
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
+ KASSERT(chk->send_size > 0, ("chunk has zero length"));
+ if (asoc->size_on_reasm_queue >= chk->send_size) {
+ asoc->size_on_reasm_queue -= chk->send_size;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
+#else
+ asoc->size_on_reasm_queue = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ sctp_free_a_readq(stcb, control);
+ }
+ }
+ TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].uno_inqueue, next_instrm, ncontrol) {
+#ifdef INVARIANTS
+ if (control->on_strm_q != SCTP_ON_UNORDERED ) {
+ panic("Huh control: %p on_q: %d -- not unordered?",
+ control, control->on_strm_q);
+ }
+#endif
+ if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn);
+ KASSERT(control->length > 0, ("control has zero length"));
+ if (asoc->size_on_all_streams >= control->length) {
+ asoc->size_on_all_streams -= control->length;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+ asoc->size_on_all_streams = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (control->on_read_q) {
+ TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
+ control->on_read_q = 0;
+ }
+ TAILQ_REMOVE(&asoc->strmin[strmat].uno_inqueue, control, next_instrm);
+ control->on_strm_q = 0;
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_free_remote_addr(control->whoFrom);
+ /* Now its reasm? */
+ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
+ KASSERT(chk->send_size > 0, ("chunk has zero length"));
+ if (asoc->size_on_reasm_queue >= chk->send_size) {
+ asoc->size_on_reasm_queue -= chk->send_size;
+ } else {
+#ifdef INVARIANTS
+ panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
+#else
+ asoc->size_on_reasm_queue = 0;
+#endif
+ }
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ sctp_free_a_readq(stcb, control);
+ }
+ }
+ }
+ if (cnt) {
+ /* We must back down to see what the new highest is */
+ for (i = asoc->highest_tsn_inside_map; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
+ SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ asoc->highest_tsn_inside_map = i;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+ }
+
+ /*
+ * Question, should we go through the delivery queue? The only
+ * reason things are on here is the app not reading OR a p-d-api up.
+ * An attacker COULD send enough in to initiate the PD-API and then
+ * send a bunch of stuff to other streams... these would wind up on
+ * the delivery queue.. and then we would not get to them. But in
+ * order to do this I then have to back-track and un-deliver
+ * sequence numbers in streams.. el-yucko. I think for now we will
+ * NOT look at the delivery queue and leave it to be something to
+ * consider later. An alternative would be to abort the P-D-API with
+ * a notification and then deliver the data.... Or another method
+ * might be to keep track of how many times the situation occurs and
+ * if we see a possible attack underway just abort the association.
+ */
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt);
+#endif
+ /*
+ * Now do we need to find a new
+ * asoc->highest_tsn_inside_map?
+ */
+ asoc->last_revoke_count = cnt;
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_11);
+ /*sa_ignore NO_NULL_CHK*/
+ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED);
+ }
+ /*
+ * Another issue, in un-setting the TSN's in the mapping array we
+ * DID NOT adjust the highest_tsn marker. This will cause one of two
+ * things to occur. It may cause us to do extra work in checking for
+ * our mapping array movement. More importantly it may cause us to
+ * SACK every datagram. This may not be a bad thing though since we
+ * will recover once we get our cum-ack above and all this stuff we
+ * dumped recovered.
+ */
+}
+
+void
+sctp_drain()
+{
+ /*
+ * We must walk the PCB lists for ALL associations here. The system
+ * is LOW on MBUF's and needs help. This is where reneging will
+ * occur. We really hope this does NOT happen!
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ VNET_ITERATOR_DECL(vnet_iter);
+#else
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+
+ SCTP_STAT_INCR(sctps_protocol_drain_calls);
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ return;
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_STAT_INCR(sctps_protocol_drain_calls);
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+#ifdef VIMAGE
+ continue;
+#else
+ return;
+#endif
+ }
+#endif
+ SCTP_INP_INFO_RLOCK();
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ /* For each endpoint */
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* For each association */
+ SCTP_TCB_LOCK(stcb);
+ sctp_drain_mbufs(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+#endif
+}
+
+/*
+ * start a new iterator
+ * iterates through all endpoints and associations based on the pcb_state
+ * flags and asoc_state. "af" (mandatory) is executed for all matching
+ * assocs and "ef" (optional) is executed when the iterator completes.
+ * "inpf" (optional) is executed for each new endpoint as it is being
+ * iterated through. inpe (optional) is called when the inp completes
+ * its way through all the stcbs.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+ asoc_func af,
+ inp_func inpe,
+ uint32_t pcb_state,
+ uint32_t pcb_features,
+ uint32_t asoc_state,
+ void *argp,
+ uint32_t argi,
+ end_func ef,
+ struct sctp_inpcb *s_inp,
+ uint8_t chunk_output_off)
+{
+ struct sctp_iterator *it = NULL;
+
+ if (af == NULL) {
+ return (-1);
+ }
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ SCTP_PRINTF("%s: abort on initialize being %d\n", __func__,
+ SCTP_BASE_VAR(sctp_pcb_initialized));
+ return (-1);
+ }
+ SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
+ SCTP_M_ITER);
+ if (it == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+ return (-1);
+ }
+ memset(it, 0, sizeof(*it));
+ it->function_assoc = af;
+ it->function_inp = inpf;
+ if (inpf)
+ it->done_current_ep = 0;
+ else
+ it->done_current_ep = 1;
+ it->function_atend = ef;
+ it->pointer = argp;
+ it->val = argi;
+ it->pcb_flags = pcb_state;
+ it->pcb_features = pcb_features;
+ it->asoc_state = asoc_state;
+ it->function_inp_end = inpe;
+ it->no_chunk_output = chunk_output_off;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ it->vn = curvnet;
+#endif
+ if (s_inp) {
+ /* Assume lock is held here */
+ it->inp = s_inp;
+ SCTP_INP_INCR_REF(it->inp);
+ it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead));
+ if (it->inp) {
+ SCTP_INP_INCR_REF(it->inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
+
+ }
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ SCTP_PRINTF("%s: rollback on initialize being %d it=%p\n", __func__,
+ SCTP_BASE_VAR(sctp_pcb_initialized), it);
+ SCTP_FREE(it, SCTP_M_ITER);
+ return (-1);
+ }
+ TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ if (sctp_it_ctl.iterator_running == 0) {
+ sctp_wakeup_iterator();
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ /* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */
+ return (0);
+}
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.h
new file mode 100644
index 000000000..3b0ea3e1f
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_pcb.h
@@ -0,0 +1,879 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.h 362106 2020-06-12 16:31:13Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_PCB_H_
+#define _NETINET_SCTP_PCB_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+#include <netinet/sctp_sysctl.h>
+
+LIST_HEAD(sctppcbhead, sctp_inpcb);
+LIST_HEAD(sctpasochead, sctp_tcb);
+LIST_HEAD(sctpladdr, sctp_laddr);
+LIST_HEAD(sctpvtaghead, sctp_tagblock);
+LIST_HEAD(sctp_vrflist, sctp_vrf);
+LIST_HEAD(sctp_ifnlist, sctp_ifn);
+LIST_HEAD(sctp_ifalist, sctp_ifa);
+TAILQ_HEAD(sctp_readhead, sctp_queued_to_read);
+TAILQ_HEAD(sctp_streamhead, sctp_stream_queue_pending);
+
+#include <netinet/sctp_structs.h>
+#include <netinet/sctp_auth.h>
+
+#define SCTP_PCBHASH_ALLADDR(port, mask) (port & mask)
+#define SCTP_PCBHASH_ASOC(tag, mask) (tag & mask)
+
+struct sctp_vrf {
+ LIST_ENTRY (sctp_vrf) next_vrf;
+ struct sctp_ifalist *vrf_addr_hash;
+ struct sctp_ifnlist ifnlist;
+ uint32_t vrf_id;
+ uint32_t tbl_id_v4; /* default v4 table id */
+ uint32_t tbl_id_v6; /* default v6 table id */
+ uint32_t total_ifa_count;
+ u_long vrf_addr_hashmark;
+ uint32_t refcount;
+};
+
+struct sctp_ifn {
+ struct sctp_ifalist ifalist;
+ struct sctp_vrf *vrf;
+ LIST_ENTRY(sctp_ifn) next_ifn;
+ LIST_ENTRY(sctp_ifn) next_bucket;
+ void *ifn_p; /* never access without appropriate lock */
+ uint32_t ifn_mtu;
+ uint32_t ifn_type;
+ uint32_t ifn_index; /* shorthand way to look at ifn for reference */
+ uint32_t refcount; /* number of reference held should be >= ifa_count */
+ uint32_t ifa_count; /* IFA's we hold (in our list - ifalist)*/
+ uint32_t num_v6; /* number of v6 addresses */
+ uint32_t num_v4; /* number of v4 addresses */
+ uint32_t registered_af; /* registered address family for i/f events */
+ char ifn_name[SCTP_IFNAMSIZ];
+};
+
+/* SCTP local IFA flags */
+#define SCTP_ADDR_VALID 0x00000001 /* its up and active */
+#define SCTP_BEING_DELETED 0x00000002 /* being deleted,
+ * when refcount = 0. Note
+ * that it is pulled from the ifn list
+ * and ifa_p is nulled right away but
+ * it cannot be freed until the last *net
+ * pointing to it is deleted.
+ */
+#define SCTP_ADDR_DEFER_USE 0x00000004 /* Hold off using this one */
+#define SCTP_ADDR_IFA_UNUSEABLE 0x00000008
+
+struct sctp_ifa {
+ LIST_ENTRY(sctp_ifa) next_ifa;
+ LIST_ENTRY(sctp_ifa) next_bucket;
+ struct sctp_ifn *ifn_p; /* back pointer to parent ifn */
+ void *ifa; /* pointer to ifa, needed for flag
+ * update for that we MUST lock
+ * appropriate locks. This is for V6.
+ */
+ union sctp_sockstore address;
+ uint32_t refcount; /* number of folks referring to this */
+ uint32_t flags;
+ uint32_t localifa_flags;
+ uint32_t vrf_id; /* vrf_id of this addr (for deleting) */
+ uint8_t src_is_loop;
+ uint8_t src_is_priv;
+ uint8_t src_is_glob;
+ uint8_t resv;
+};
+
+struct sctp_laddr {
+ LIST_ENTRY(sctp_laddr) sctp_nxt_addr; /* next in list */
+ struct sctp_ifa *ifa;
+ uint32_t action; /* Used during asconf and adding
+ * if no-zero src-addr selection will
+ * not consider this address.
+ */
+ struct timeval start_time; /* time when this address was created */
+};
+
+struct sctp_block_entry {
+ int error;
+};
+
+struct sctp_timewait {
+ uint32_t tv_sec_at_expire; /* the seconds from boot to expire */
+ uint32_t v_tag; /* the vtag that can not be reused */
+ uint16_t lport; /* the local port used in vtag */
+ uint16_t rport; /* the remote port used in vtag */
+};
+
+struct sctp_tagblock {
+ LIST_ENTRY(sctp_tagblock) sctp_nxt_tagblock;
+ struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK];
+};
+
+
+struct sctp_epinfo {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef INET
+ struct socket *udp4_tun_socket;
+#endif
+#ifdef INET6
+ struct socket *udp6_tun_socket;
+#endif
+#endif
+ struct sctpasochead *sctp_asochash;
+ u_long hashasocmark;
+
+ struct sctppcbhead *sctp_ephash;
+ u_long hashmark;
+
+ /*-
+ * The TCP model represents a substantial overhead in that we get an
+ * additional hash table to keep explicit connections in. The
+ * listening TCP endpoint will exist in the usual ephash above and
+ * accept only INIT's. It will be incapable of sending off an INIT.
+ * When a dg arrives we must look in the normal ephash. If we find a
+ * TCP endpoint that will tell us to go to the specific endpoint
+ * hash and re-hash to find the right assoc/socket. If we find a UDP
+ * model socket we then must complete the lookup. If this fails,
+ * i.e. no association can be found then we must continue to see if
+ * a sctp_peeloff()'d socket is in the tcpephash (a spun off socket
+ * acts like a TCP model connected socket).
+ */
+ struct sctppcbhead *sctp_tcpephash;
+ u_long hashtcpmark;
+ uint32_t hashtblsize;
+
+ struct sctp_vrflist *sctp_vrfhash;
+ u_long hashvrfmark;
+
+ struct sctp_ifnlist *vrf_ifn_hash;
+ u_long vrf_ifn_hashmark;
+
+ struct sctppcbhead listhead;
+ struct sctpladdr addr_wq;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct inpcbhead inplisthead;
+ struct inpcbinfo sctbinfo;
+#endif
+ /* ep zone info */
+ sctp_zone_t ipi_zone_ep;
+ sctp_zone_t ipi_zone_asoc;
+ sctp_zone_t ipi_zone_laddr;
+ sctp_zone_t ipi_zone_net;
+ sctp_zone_t ipi_zone_chunk;
+ sctp_zone_t ipi_zone_readq;
+ sctp_zone_t ipi_zone_strmoq;
+ sctp_zone_t ipi_zone_asconf;
+ sctp_zone_t ipi_zone_asconf_ack;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct rwlock ipi_ep_mtx;
+ struct mtx ipi_iterator_wq_mtx;
+ struct rwlock ipi_addr_mtx;
+ struct mtx ipi_pktlog_mtx;
+ struct mtx wq_addr_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+ userland_mutex_t ipi_ep_mtx;
+ userland_mutex_t ipi_addr_mtx;
+ userland_mutex_t ipi_count_mtx;
+ userland_mutex_t ipi_pktlog_mtx;
+ userland_mutex_t wq_addr_mtx;
+#elif defined(__APPLE__) && !defined(__Userspace__)
+#ifdef _KERN_LOCKS_H_
+ lck_mtx_t *ipi_addr_mtx;
+ lck_mtx_t *ipi_count_mtx;
+ lck_mtx_t *ipi_pktlog_mtx;
+ lck_mtx_t *logging_mtx;
+ lck_mtx_t *wq_addr_mtx;
+#else
+ void *ipi_count_mtx;
+ void *logging_mtx;
+#endif /* _KERN_LOCKS_H_ */
+#elif defined(_WIN32) && !defined(__Userspace__)
+ struct rwlock ipi_ep_lock;
+ struct rwlock ipi_addr_lock;
+ struct spinlock ipi_pktlog_mtx;
+ struct rwlock wq_addr_mtx;
+#elif defined(__Userspace__)
+#endif
+ uint32_t ipi_count_ep;
+
+ /* assoc/tcb zone info */
+ uint32_t ipi_count_asoc;
+
+ /* local addrlist zone info */
+ uint32_t ipi_count_laddr;
+
+ /* remote addrlist zone info */
+ uint32_t ipi_count_raddr;
+
+ /* chunk structure list for output */
+ uint32_t ipi_count_chunk;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_readq;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_strmoq;
+
+ /* Number of vrfs */
+ uint32_t ipi_count_vrfs;
+
+ /* Number of ifns */
+ uint32_t ipi_count_ifns;
+
+ /* Number of ifas */
+ uint32_t ipi_count_ifas;
+
+ /* system wide number of free chunks hanging around */
+ uint32_t ipi_free_chunks;
+ uint32_t ipi_free_strmoq;
+
+ struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE];
+
+ /* address work queue handling */
+ struct sctp_timer addr_wq_timer;
+
+#if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_)
+ struct calloutlist callqueue;
+#endif
+};
+
+
+struct sctp_base_info {
+ /* All static structures that
+ * anchor the system must be here.
+ */
+ struct sctp_epinfo sctppcbinfo;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ struct sctpstat *sctpstat;
+#else
+ struct sctpstat sctpstat;
+#endif
+#else
+ struct sctpstat sctpstat;
+#endif
+ struct sctp_sysctl sctpsysctl;
+ uint8_t first_time;
+ char sctp_pcb_initialized;
+#if defined(SCTP_PACKET_LOGGING)
+ int packet_log_writers;
+ int packet_log_end;
+ uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ eventhandler_tag eh_tag;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ int sctp_main_timer_ticks;
+#endif
+#if defined(__Userspace__)
+ userland_mutex_t timer_mtx;
+ userland_thread_t timer_thread;
+ int timer_thread_should_exit;
+ int iterator_thread_started;
+ int timer_thread_started;
+#if !defined(_WIN32)
+ pthread_mutexattr_t mtx_attr;
+#if defined(INET) || defined(INET6)
+ int userspace_route;
+ userland_thread_t recvthreadroute;
+#endif
+#endif
+#ifdef INET
+#if defined(_WIN32) && !defined(__MINGW32__)
+ SOCKET userspace_rawsctp;
+ SOCKET userspace_udpsctp;
+#else
+ int userspace_rawsctp;
+ int userspace_udpsctp;
+#endif
+ userland_thread_t recvthreadraw;
+ userland_thread_t recvthreadudp;
+#endif
+#ifdef INET6
+#if defined(_WIN32) && !defined(__MINGW32__)
+ SOCKET userspace_rawsctp6;
+ SOCKET userspace_udpsctp6;
+#else
+ int userspace_rawsctp6;
+ int userspace_udpsctp6;
+#endif
+ userland_thread_t recvthreadraw6;
+ userland_thread_t recvthreadudp6;
+#endif
+ int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df);
+ void (*debug_printf)(const char *format, ...);
+ int crc32c_offloaded;
+#endif
+};
+
+/*-
+ * Here we have all the relevant information for each SCTP entity created. We
+ * will need to modify this as approprate. We also need to figure out how to
+ * access /dev/random.
+ */
+struct sctp_pcb {
+ unsigned int time_of_secret_change; /* number of seconds from
+ * timeval.tv_sec */
+ uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS];
+ unsigned int size_of_a_cookie;
+
+ uint32_t sctp_timeoutticks[SCTP_NUM_TMRS];
+ uint32_t sctp_minrto;
+ uint32_t sctp_maxrto;
+ uint32_t initial_rto;
+ uint32_t initial_init_rto_max;
+
+ unsigned int sctp_sack_freq;
+ uint32_t sctp_sws_sender;
+ uint32_t sctp_sws_receiver;
+
+ uint32_t sctp_default_cc_module;
+ uint32_t sctp_default_ss_module;
+ /* authentication related fields */
+ struct sctp_keyhead shared_keys;
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_hmaclist_t *local_hmacs;
+ uint16_t default_keyid;
+ uint32_t default_mtu;
+
+ /* various thresholds */
+ /* Max times I will init at a guy */
+ uint16_t max_init_times;
+
+ /* Max times I will send before we consider someone dead */
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ uint16_t def_net_pf_threshold;
+
+ /* number of streams to pre-open on a association */
+ uint16_t pre_open_stream_count;
+ uint16_t max_open_streams_intome;
+
+ /* random number generator */
+ uint32_t random_counter;
+ uint8_t random_numbers[SCTP_SIGNATURE_ALOC_SIZE];
+ uint8_t random_store[SCTP_SIGNATURE_ALOC_SIZE];
+
+ /*
+ * This timer is kept running per endpoint. When it fires it will
+ * change the secret key. The default is once a hour
+ */
+ struct sctp_timer signature_change;
+
+ uint32_t def_cookie_life;
+ /* defaults to 0 */
+ uint32_t auto_close_time;
+ uint32_t initial_sequence_debug;
+ uint32_t adaptation_layer_indicator;
+ uint8_t adaptation_layer_indicator_provided;
+ uint32_t store_at;
+ uint32_t max_burst;
+ uint32_t fr_max_burst;
+#ifdef INET6
+ uint32_t default_flowlabel;
+#endif
+ uint8_t default_dscp;
+ char current_secret_number;
+ char last_secret_number;
+ uint16_t port; /* remote UDP encapsulation port */
+};
+
+#ifndef SCTP_ALIGNMENT
+#define SCTP_ALIGNMENT 32
+#endif
+
+#ifndef SCTP_ALIGNM1
+#define SCTP_ALIGNM1 (SCTP_ALIGNMENT-1)
+#endif
+
+#define sctp_lport ip_inp.inp.inp_lport
+
+struct sctp_pcbtsn_rlog {
+ uint32_t vtag;
+ uint16_t strm;
+ uint16_t seq;
+ uint16_t sz;
+ uint16_t flgs;
+};
+#define SCTP_READ_LOG_SIZE 135 /* we choose the number to make a pcb a page */
+
+
+struct sctp_inpcb {
+ /*-
+ * put an inpcb in front of it all, kind of a waste but we need to
+ * for compatibility with all the other stuff.
+ */
+ union {
+ struct inpcb inp;
+ char align[(sizeof(struct inpcb) + SCTP_ALIGNM1) &
+ ~SCTP_ALIGNM1];
+ } ip_inp;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ /* leave some space in case i386 inpcb is bigger than ppc */
+ uint8_t padding[128];
+#endif
+
+ /* Socket buffer lock protects read_queue and of course sb_cc */
+ struct sctp_readhead read_queue;
+
+ LIST_ENTRY(sctp_inpcb) sctp_list; /* lists all endpoints */
+ /* hash of all endpoints for model */
+ LIST_ENTRY(sctp_inpcb) sctp_hash;
+ /* count of local addresses bound, 0 if bound all */
+ int laddr_count;
+
+ /* list of addrs in use by the EP, NULL if bound-all */
+ struct sctpladdr sctp_addr_list;
+ /* used for source address selection rotation when we are subset bound */
+ struct sctp_laddr *next_addr_touse;
+
+ /* back pointer to our socket */
+ struct socket *sctp_socket;
+ uint64_t sctp_features; /* Feature flags */
+ uint32_t sctp_flags; /* INP state flag set */
+ uint32_t sctp_mobility_features; /* Mobility Feature flags */
+ struct sctp_pcb sctp_ep;/* SCTP ep data */
+ /* head of the hash of all associations */
+ struct sctpasochead *sctp_tcbhash;
+ u_long sctp_hashmark;
+ /* head of the list of all associations */
+ struct sctpasochead sctp_asoc_list;
+#ifdef SCTP_TRACK_FREED_ASOCS
+ struct sctpasochead sctp_asoc_free_list;
+#endif
+ struct sctp_iterator *inp_starting_point_for_iterator;
+ uint32_t sctp_frag_point;
+ uint32_t partial_delivery_point;
+ uint32_t sctp_context;
+ uint32_t max_cwnd;
+ uint8_t local_strreset_support;
+ uint32_t sctp_cmt_on_off;
+ uint8_t ecn_supported;
+ uint8_t prsctp_supported;
+ uint8_t auth_supported;
+ uint8_t idata_supported;
+ uint8_t asconf_supported;
+ uint8_t reconfig_supported;
+ uint8_t nrsack_supported;
+ uint8_t pktdrop_supported;
+ struct sctp_nonpad_sndrcvinfo def_send;
+ /*-
+ * These three are here for the sosend_dgram
+ * (pkt, pkt_last and control).
+ * routine. However, I don't think anyone in
+ * the current FreeBSD kernel calls this. So
+ * they are candidates with sctp_sendm for
+ * de-supporting.
+ */
+ struct mbuf *pkt, *pkt_last;
+ struct mbuf *control;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct mtx inp_mtx;
+ struct mtx inp_create_mtx;
+ struct mtx inp_rdata_mtx;
+ int32_t refcount;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+ userland_mutex_t inp_mtx;
+ userland_mutex_t inp_create_mtx;
+ userland_mutex_t inp_rdata_mtx;
+ int32_t refcount;
+#elif defined(__APPLE__) && !defined(__Userspace__)
+#if defined(SCTP_APPLE_RWLOCK)
+ lck_rw_t *inp_mtx;
+#else
+ lck_mtx_t *inp_mtx;
+#endif
+ lck_mtx_t *inp_create_mtx;
+ lck_mtx_t *inp_rdata_mtx;
+#elif defined(_WIN32) && !defined(__Userspace__)
+ struct rwlock inp_lock;
+ struct spinlock inp_create_lock;
+ struct spinlock inp_rdata_lock;
+ int32_t refcount;
+#elif defined(__Userspace__)
+ int32_t refcount;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ int32_t refcount;
+
+ uint32_t lock_caller1;
+ uint32_t lock_caller2;
+ uint32_t lock_caller3;
+ uint32_t unlock_caller1;
+ uint32_t unlock_caller2;
+ uint32_t unlock_caller3;
+ uint32_t getlock_caller1;
+ uint32_t getlock_caller2;
+ uint32_t getlock_caller3;
+ uint32_t gen_count;
+ uint32_t lock_gen_count;
+ uint32_t unlock_gen_count;
+ uint32_t getlock_gen_count;
+
+ uint32_t i_am_here_file;
+ uint32_t i_am_here_line;
+#endif
+ uint32_t def_vrf_id;
+ uint16_t fibnum;
+#ifdef SCTP_MVRF
+ uint32_t *m_vrf_ids;
+ uint32_t num_vrfs;
+ uint32_t vrf_size;
+#endif
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t last_abort_code;
+ uint32_t total_nospaces;
+ struct sctpasochead *sctp_asocidhash;
+ u_long hashasocidmark;
+ uint32_t sctp_associd_counter;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
+ uint32_t readlog_index;
+#endif
+#if defined(__Userspace__)
+ void *ulp_info;
+ int (*recv_callback)(struct socket *, union sctp_sockstore, void *, size_t,
+ struct sctp_rcvinfo, int, void *);
+ uint32_t send_sb_threshold;
+ int (*send_callback)(struct socket *, uint32_t);
+#endif
+};
+
+#if defined(__Userspace__)
+int register_recv_cb (struct socket *,
+ int (*)(struct socket *, union sctp_sockstore, void *, size_t,
+ struct sctp_rcvinfo, int, void *));
+int register_send_cb (struct socket *, uint32_t, int (*)(struct socket *, uint32_t));
+int register_ulp_info (struct socket *, void *);
+int retrieve_ulp_info (struct socket *, void **);
+
+#endif
+struct sctp_tcb {
+ struct socket *sctp_socket; /* back pointer to socket */
+ struct sctp_inpcb *sctp_ep; /* back pointer to ep */
+ LIST_ENTRY(sctp_tcb) sctp_tcbhash; /* next link in hash
+ * table */
+ LIST_ENTRY(sctp_tcb) sctp_tcblist; /* list of all of the
+ * TCB's */
+ LIST_ENTRY(sctp_tcb) sctp_tcbasocidhash; /* next link in asocid
+ * hash table
+ */
+ LIST_ENTRY(sctp_tcb) sctp_asocs; /* vtag hash list */
+ struct sctp_block_entry *block_entry; /* pointer locked by socket
+ * send buffer */
+ struct sctp_association asoc;
+ /*-
+ * freed_by_sorcv_sincelast is protected by the sockbuf_lock NOT the
+ * tcb_lock. Its special in this way to help avoid extra mutex calls
+ * in the reading of data.
+ */
+ uint32_t freed_by_sorcv_sincelast;
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ int freed_from_where;
+ uint16_t rport; /* remote port in network format */
+ uint16_t resv;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct mtx tcb_mtx;
+ struct mtx tcb_send_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+ userland_mutex_t tcb_mtx;
+ userland_mutex_t tcb_send_mtx;
+#elif defined(__APPLE__) && !defined(__Userspace__)
+ lck_mtx_t* tcb_mtx;
+ lck_mtx_t* tcb_send_mtx;
+#elif defined(_WIN32) && !defined(__Userspace__)
+ struct spinlock tcb_lock;
+ struct spinlock tcb_send_lock;
+#elif defined(__Userspace__)
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ uint32_t caller1;
+ uint32_t caller2;
+ uint32_t caller3;
+#endif
+};
+
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+
+#include <netinet/sctp_lock_bsd.h>
+
+#elif defined(__APPLE__) && !defined(__Userspace__)
+/*
+ * Apple MacOS X 10.4 "Tiger"
+ */
+
+#include <netinet/sctp_lock_apple_fg.h>
+
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+
+#include <netinet/sctp_process_lock.h>
+
+#elif defined(_WIN32) && !defined(__Userspace__)
+
+#include <netinet/sctp_lock_windows.h>
+
+#elif defined(__Userspace__)
+
+#include <netinet/sctp_lock_userspace.h>
+
+#else
+/*
+ * Pre-5.x FreeBSD, and others.
+ */
+#include <netinet/sctp_lock_empty.h>
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/* Attention Julian, this is the extern that
+ * goes with the base info. sctp_pcb.c has
+ * the real definition.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+VNET_DECLARE(struct sctp_base_info, system_base_info) ;
+#else
+extern struct sctp_base_info system_base_info;
+#endif
+
+#ifdef INET6
+int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
+#endif
+
+void sctp_fill_pcbinfo(struct sctp_pcbinfo *);
+
+struct sctp_ifn *
+sctp_find_ifn(void *ifn, uint32_t ifn_index);
+
+struct sctp_vrf *sctp_allocate_vrf(int vrfid);
+struct sctp_vrf *sctp_find_vrf(uint32_t vrfid);
+void sctp_free_vrf(struct sctp_vrf *vrf);
+
+/*-
+ * Change address state, can be used if
+ * O/S supports telling transports about
+ * changes to IFA/IFN's (link layer triggers).
+ * If a ifn goes down, we will do src-addr-selection
+ * and NOT use that, as a source address. This does
+ * not stop the routing system from routing out
+ * that interface, but we won't put it as a source.
+ */
+void sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+void sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrfid,
+ void *ifn, uint32_t ifn_index, uint32_t ifn_type,
+ const char *if_name,
+ void *ifa, struct sockaddr *addr, uint32_t ifa_flags,
+ int dynamic_add);
+
+void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu);
+
+void sctp_free_ifn(struct sctp_ifn *sctp_ifnp);
+void sctp_free_ifa(struct sctp_ifa *sctp_ifap);
+
+
+void sctp_del_addr_from_vrf(uint32_t vrfid, struct sockaddr *addr,
+ uint32_t ifn_index, const char *if_name);
+
+
+
+struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *);
+
+struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int, uint32_t);
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+ struct sctp_ifa *,struct thread *);
+#elif defined(_WIN32) && !defined(__Userspace__)
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+ struct sctp_ifa *,PKTHREAD);
+#else
+/* struct proc is a dummy for __Userspace__ */
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+ struct sctp_ifa *, struct proc *);
+#endif
+
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *, int,
+ struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb **,
+ struct sctp_nets **, uint32_t vrf_id);
+
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *,
+ struct sockaddr *, struct sctp_inpcb **, struct sctp_nets **, int, uint32_t);
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *, struct sctp_inpcb *,
+ struct sctp_tcb *);
+
+/*-
+ * For this call ep_addr, the to is the destination endpoint address of the
+ * peer (relative to outbound). The from field is only used if the TCP model
+ * is enabled and helps distingush amongst the subset bound (non-boundall).
+ * The TCP model MAY change the actual ep field, this is why it is passed.
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **,
+ struct sockaddr *, struct sctp_nets **, struct sockaddr *,
+ struct sctp_tcb *);
+
+struct sctp_tcb *
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *,
+ sctp_assoc_t, int);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *, int, struct sockaddr *,
+ struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **, uint32_t vrf_id);
+
+int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id);
+
+int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id);
+
+void sctp_inpcb_free(struct sctp_inpcb *, int, int);
+
+#define SCTP_DONT_INITIALIZE_AUTH_PARAMS 0
+#define SCTP_INITIALIZE_AUTH_PARAMS 1
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+ int *, uint32_t, uint32_t, uint16_t, uint16_t, struct thread *,
+ int);
+#elif defined(_WIN32) && !defined(__Userspace__)
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+ int *, uint32_t, uint32_t, uint16_t, uint16_t, PKTHREAD, int);
+#else
+/* proc will be NULL for __Userspace__ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+ int *, uint32_t, uint32_t, uint16_t, uint16_t, struct proc *,
+ int);
+#endif
+
+int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);
+
+
+void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t);
+
+int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport);
+
+void
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport);
+
+void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t);
+
+void sctp_del_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *);
+
+int sctp_add_remote_addr(struct sctp_tcb *, struct sockaddr *, struct sctp_nets **, uint16_t, int, int);
+
+void sctp_remove_net(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_del_remote_addr(struct sctp_tcb *, struct sockaddr *);
+
+#if defined(__Userspace__)
+void sctp_pcb_init(int);
+#else
+void sctp_pcb_init(void);
+#endif
+
+void sctp_pcb_finish(void);
+
+void sctp_add_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+void sctp_del_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sockaddr *, struct sockaddr *, struct sockaddr *, uint16_t);
+
+int
+sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
+ struct sctp_nets *);
+
+int sctp_is_vtag_good(uint32_t, uint16_t lport, uint16_t rport, struct timeval *);
+
+/* void sctp_drain(void); */
+
+int sctp_destination_is_reachable(struct sctp_tcb *, struct sockaddr *);
+
+int sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp);
+
+void sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh);
+
+/*-
+ * Null in last arg inpcb indicate run on ALL ep's. Specific inp in last arg
+ * indicates run on ONLY assoc's of the specified endpoint.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+ asoc_func af,
+ inp_func inpe,
+ uint32_t, uint32_t,
+ uint32_t, void *,
+ uint32_t,
+ end_func ef,
+ struct sctp_inpcb *,
+ uint8_t co_off);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+void
+sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use);
+
+#endif
+#endif
+
+#endif /* _KERNEL */
+#endif /* !__sctp_pcb_h__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.c
new file mode 100644
index 000000000..8581f515f
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.c
@@ -0,0 +1,303 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.c 362054 2020-06-11 13:34:09Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_peeloff.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+
+int
+sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ uint32_t state;
+
+ if (head == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EBADF);
+ return (EBADF);
+ }
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ return (EFAULT);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOENT);
+ return (ENOENT);
+ }
+ state = SCTP_GET_STATE(stcb);
+ if ((state == SCTP_STATE_EMPTY) ||
+ (state == SCTP_STATE_INUSE)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /* We are clear to peel this one off */
+ return (0);
+}
+
+int
+sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+ uint32_t state;
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ return (EFAULT);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+
+ state = SCTP_GET_STATE(stcb);
+ if ((state == SCTP_STATE_EMPTY) ||
+ (state == SCTP_STATE_INUSE)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+
+ n_inp = (struct sctp_inpcb *)so->so_pcb;
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_socket = so;
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_mobility_features = inp->sctp_mobility_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+ n_inp->ecn_supported = inp->ecn_supported;
+ n_inp->prsctp_supported = inp->prsctp_supported;
+ n_inp->auth_supported = inp->auth_supported;
+ n_inp->asconf_supported = inp->asconf_supported;
+ n_inp->reconfig_supported = inp->reconfig_supported;
+ n_inp->nrsack_supported = inp->nrsack_supported;
+ n_inp->pktdrop_supported = inp->pktdrop_supported;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->max_cwnd = inp->max_cwnd;
+ n_inp->local_strreset_support = inp->local_strreset_support;
+ n_inp->inp_starting_point_for_iterator = NULL;
+ /* copy in the authentication parameters from the original endpoint */
+ if (n_inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+ n_inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (n_inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+ n_inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &n_inp->sctp_ep.shared_keys);
+#if defined(__Userspace__)
+ n_inp->ulp_info = inp->ulp_info;
+ n_inp->recv_callback = inp->recv_callback;
+ n_inp->send_callback = inp->send_callback;
+ n_inp->send_sb_threshold = inp->send_sb_threshold;
+#endif
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+#else
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+ return (0);
+}
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+struct socket *
+sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
+{
+ struct socket *newso;
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+
+ SCTPDBG(SCTP_DEBUG_PEEL1, "SCTP peel-off called\n");
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ *error = EFAULT;
+ return (NULL);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ *error = ENOTCONN;
+ return (NULL);
+ }
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_SET(head->so_vnet);
+#endif
+ newso = sonewconn(head, SS_ISCONNECTED
+#if defined(__APPLE__) && !defined(__Userspace__)
+ , NULL
+#endif
+ );
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_RESTORE();
+#endif
+ if (newso == NULL) {
+ SCTPDBG(SCTP_DEBUG_PEEL1, "sctp_peeloff:sonewconn failed\n");
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOMEM);
+ *error = ENOMEM;
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ else {
+ SCTP_SOCKET_LOCK(newso, 1);
+ }
+#endif
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ n_inp = (struct sctp_inpcb *)newso->so_pcb;
+ SOCK_LOCK(head);
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+ n_inp->ecn_supported = inp->ecn_supported;
+ n_inp->prsctp_supported = inp->prsctp_supported;
+ n_inp->auth_supported = inp->auth_supported;
+ n_inp->asconf_supported = inp->asconf_supported;
+ n_inp->reconfig_supported = inp->reconfig_supported;
+ n_inp->nrsack_supported = inp->nrsack_supported;
+ n_inp->pktdrop_supported = inp->pktdrop_supported;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->max_cwnd = inp->max_cwnd;
+ n_inp->local_strreset_support = inp->local_strreset_support;
+ n_inp->inp_starting_point_for_iterator = NULL;
+#if defined(__Userspace__)
+ n_inp->ulp_info = inp->ulp_info;
+ n_inp->recv_callback = inp->recv_callback;
+ n_inp->send_callback = inp->send_callback;
+ n_inp->send_sb_threshold = inp->send_sb_threshold;
+#endif
+
+ /* copy in the authentication parameters from the original endpoint */
+ if (n_inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+ n_inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (n_inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+ n_inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &n_inp->sctp_ep.shared_keys);
+
+ n_inp->sctp_socket = newso;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_feature_off(n_inp, SCTP_PCB_FLAGS_AUTOCLOSE);
+ n_inp->sctp_ep.auto_close_time = 0;
+ sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, n_inp, stcb, NULL,
+ SCTP_FROM_SCTP_PEELOFF + SCTP_LOC_1);
+ }
+ /* Turn off any non-blocking semantic. */
+ SOCK_LOCK(newso);
+ SCTP_CLEAR_SO_NBIO(newso);
+ newso->so_state |= SS_ISCONNECTED;
+ SOCK_UNLOCK(newso);
+ /* We remove it right away */
+
+#ifdef SCTP_LOCK_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+ }
+#endif
+ TAILQ_REMOVE(&head->so_comp, newso, so_list);
+ head->so_qlen--;
+ SOCK_UNLOCK(head);
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ /*
+ * And now the final hack. We move data in the pending side i.e.
+ * head to the new socket buffer. Let the GRUBBING begin :-0
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+#else
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (newso);
+}
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.h
new file mode 100644
index 000000000..7e1c5ecee
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_peeloff.h
@@ -0,0 +1,70 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.h 309607 2016-12-06 10:21:25Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_PEELOFF_H_
+#define _NETINET_SCTP_PEELOFF_H_
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+/* socket option peeloff */
+struct sctp_peeloff_opt {
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ int s;
+#else
+ HANDLE s;
+#endif
+ sctp_assoc_t assoc_id;
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ int new_sd;
+#else
+ HANDLE new_sd;
+#endif
+};
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+#if defined(_KERNEL)
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+struct socket *sctp_get_peeloff(struct socket *, sctp_assoc_t, int *);
+int sctp_peeloff_option(struct proc *p, struct sctp_peeloff_opt *peeloff);
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+#endif /* _KERNEL */
+#if defined(__Userspace__)
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+#endif /* __Userspace__ */
+#endif /* _NETINET_SCTP_PEELOFF_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_process_lock.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_process_lock.h
new file mode 100644
index 000000000..ff0e05048
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_process_lock.h
@@ -0,0 +1,677 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __sctp_process_lock_h__
+#define __sctp_process_lock_h__
+
+/*
+ * Need to yet define five atomic fuctions or
+ * their equivalant.
+ * - atomic_add_int(&foo, val) - add atomically the value
+ * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
+ * but value it was is returned.
+ * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
+ *
+ * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
+ * in foo if and only if
+ * foo is value. Returns 0
+ * on success.
+ */
+
+#ifdef SCTP_PER_SOCKET_LOCKING
+/*
+ * per socket level locking
+ */
+
+#if defined(_WIN32)
+/* Lock for INFO stuff */
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+#define SCTP_INP_INFO_LOCK_DESTROY()
+#define SCTP_IPI_COUNT_INIT()
+#define SCTP_IPI_COUNT_DESTROY()
+#else
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+#define SCTP_INP_INFO_LOCK_DESTROY()
+#define SCTP_IPI_COUNT_INIT()
+#define SCTP_IPI_COUNT_DESTROY()
+#endif
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb)
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_SEND_LOCK(_tcb)
+#define SCTP_TCB_SEND_UNLOCK(_tcb)
+
+/* Lock for INP */
+#define SCTP_INP_LOCK_INIT(_inp)
+#define SCTP_INP_LOCK_DESTROY(_inp)
+
+#define SCTP_INP_RLOCK(_inp)
+#define SCTP_INP_RUNLOCK(_inp)
+#define SCTP_INP_WLOCK(_inp)
+#define SCTP_INP_WUNLOCK(_inp)
+#define SCTP_INP_RLOCK_ASSERT(_inp)
+#define SCTP_INP_WLOCK_ASSERT(_inp)
+#define SCTP_INP_INCR_REF(_inp)
+#define SCTP_INP_DECR_REF(_inp)
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
+#define SCTP_ASOC_CREATE_LOCK(_inp)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp)
+
+#define SCTP_INP_READ_INIT(_inp)
+#define SCTP_INP_READ_DESTROY(_inp)
+#define SCTP_INP_READ_LOCK(_inp)
+#define SCTP_INP_READ_UNLOCK(_inp)
+
+/* Lock for TCB */
+#define SCTP_TCB_LOCK_INIT(_tcb)
+#define SCTP_TCB_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_LOCK(_tcb)
+#define SCTP_TCB_TRYLOCK(_tcb) 1
+#define SCTP_TCB_UNLOCK(_tcb)
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+#else
+/*
+ * per tcb level locking
+ */
+#define SCTP_IPI_COUNT_INIT()
+
+#if defined(_WIN32)
+#define SCTP_WQ_ADDR_INIT() \
+ InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_DESTROY() \
+ DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_LOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_UNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_LOCK_ASSERT()
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_LOCK_DESTROY() \
+ DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RLOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_TRYLOCK() \
+ TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WLOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RUNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WUNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+
+#define SCTP_IP_PKTLOG_INIT() \
+ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_DESTROY () \
+ DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_LOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_UNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+#define SCTP_INP_READ_INIT(_inp) \
+ InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_DESTROY(_inp) \
+ DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_LOCK(_inp) \
+ EnterCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_UNLOCK(_inp) \
+ LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+ InitializeCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+ DeleteCriticalSection(&(_inp)->inp_mtx)
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ EnterCriticalSection(&(_inp)->inp_mtx); \
+} while (0)
+#define SCTP_INP_WLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ EnterCriticalSection(&(_inp)->inp_mtx); \
+} while (0)
+#else
+#define SCTP_INP_RLOCK(_inp) \
+ EnterCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_INP_WLOCK(_inp) \
+ EnterCriticalSection(&(_inp)->inp_mtx)
+#endif
+#define SCTP_INP_RLOCK_ASSERT(_tcb)
+#define SCTP_INP_WLOCK_ASSERT(_tcb)
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+ InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
+ DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
+#define SCTP_TCB_SEND_LOCK(_tcb) \
+ EnterCriticalSection(&(_tcb)->tcb_send_mtx)
+#define SCTP_TCB_SEND_UNLOCK(_tcb) \
+ LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+ InitializeCriticalSection(&(_inp)->inp_create_mtx)
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+ DeleteCriticalSection(&(_inp)->inp_create_mtx)
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
+ EnterCriticalSection(&(_inp)->inp_create_mtx); \
+} while (0)
+#else
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ EnterCriticalSection(&(_inp)->inp_create_mtx)
+#endif
+
+#define SCTP_INP_RUNLOCK(_inp) \
+ LeaveCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) \
+ LeaveCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) \
+ LeaveCriticalSection(&(_inp)->inp_create_mtx)
+
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+ InitializeCriticalSection(&(_tcb)->tcb_mtx)
+#define SCTP_TCB_LOCK_DESTROY(_tcb) \
+ DeleteCriticalSection(&(_tcb)->tcb_mtx)
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
+ EnterCriticalSection(&(_tcb)->tcb_mtx); \
+} while (0)
+#else
+#define SCTP_TCB_LOCK(_tcb) \
+ EnterCriticalSection(&(_tcb)->tcb_mtx)
+#endif
+#define SCTP_TCB_TRYLOCK(_tcb) ((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
+#define SCTP_TCB_UNLOCK(_tcb) \
+ LeaveCriticalSection(&(_tcb)->tcb_mtx)
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+#else /* all Userspaces except Windows */
+#define SCTP_WQ_ADDR_INIT() \
+ (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_WQ_ADDR_DESTROY() \
+ (void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
+#ifdef INVARIANTS
+#define SCTP_WQ_ADDR_LOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx already locked", __func__))
+#define SCTP_WQ_ADDR_UNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx not locked", __func__))
+#else
+#define SCTP_WQ_ADDR_LOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_UNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
+#endif
+#define SCTP_WQ_ADDR_LOCK_ASSERT() \
+ KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s: wq_addr_mtx not locked", __func__))
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+ (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_INP_INFO_LOCK_DESTROY() \
+ (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
+#ifdef INVARIANTS
+#define SCTP_INP_INFO_RLOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
+#define SCTP_INP_INFO_WLOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
+#define SCTP_INP_INFO_RUNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
+#define SCTP_INP_INFO_WUNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
+#else
+#define SCTP_INP_INFO_RLOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WLOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RUNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WUNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#endif
+#define SCTP_INP_INFO_TRYLOCK() \
+ (!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
+
+#define SCTP_IP_PKTLOG_INIT() \
+ (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_IP_PKTLOG_DESTROY() \
+ (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#ifdef INVARIANTS
+#define SCTP_IP_PKTLOG_LOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx already locked", __func__))
+#define SCTP_IP_PKTLOG_UNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx not locked", __func__))
+#else
+#define SCTP_IP_PKTLOG_LOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_UNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#endif
+
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+#define SCTP_INP_READ_INIT(_inp) \
+ (void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_INP_READ_DESTROY(_inp) \
+ (void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
+#ifdef INVARIANTS
+#define SCTP_INP_READ_LOCK(_inp) \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx already locked", __func__))
+#define SCTP_INP_READ_UNLOCK(_inp) \
+ KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx not locked", __func__))
+#else
+#define SCTP_INP_READ_LOCK(_inp) \
+ (void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_UNLOCK(_inp) \
+ (void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
+#endif
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+ (void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+ (void)pthread_mutex_destroy(&(_inp)->inp_mtx)
+#ifdef INVARIANTS
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__)) \
+} while (0)
+#define SCTP_INP_WLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
+} while (0)
+#else
+#define SCTP_INP_RLOCK(_inp) \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
+#define SCTP_INP_WLOCK(_inp) \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
+#endif
+#define SCTP_INP_RUNLOCK(_inp) \
+ KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
+#define SCTP_INP_WUNLOCK(_inp) \
+ KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
+#else
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
+} while (0)
+#define SCTP_INP_WLOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
+ (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
+} while (0)
+#else
+#define SCTP_INP_RLOCK(_inp) \
+ (void)pthread_mutex_lock(&(_inp)->inp_mtx)
+#define SCTP_INP_WLOCK(_inp) \
+ (void)pthread_mutex_lock(&(_inp)->inp_mtx)
+#endif
+#define SCTP_INP_RUNLOCK(_inp) \
+ (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) \
+ (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
+#endif
+#define SCTP_INP_RLOCK_ASSERT(_inp) \
+ KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
+#define SCTP_INP_WLOCK_ASSERT(_inp) \
+ KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+ (void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
+ (void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
+#ifdef INVARIANTS
+#define SCTP_TCB_SEND_LOCK(_tcb) \
+ KASSERT(pthread_mutex_lock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx already locked", __func__))
+#define SCTP_TCB_SEND_UNLOCK(_tcb) \
+ KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx not locked", __func__))
+#else
+#define SCTP_TCB_SEND_LOCK(_tcb) \
+ (void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx)
+#define SCTP_TCB_SEND_UNLOCK(_tcb) \
+ (void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
+#endif
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+ (void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+ (void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
+#ifdef INVARIANTS
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__)) \
+} while (0)
+#else
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))
+#endif
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) \
+ KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx not locked", __func__))
+#else
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
+ (void)pthread_mutex_lock(&(_inp)->inp_create_mtx); \
+} while (0)
+#else
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ (void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
+#endif
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) \
+ (void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
+#endif
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+ (void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_TCB_LOCK_DESTROY(_tcb) \
+ (void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
+#ifdef INVARIANTS
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
+ KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__)) \
+} while (0)
+#else
+#define SCTP_TCB_LOCK(_tcb) \
+ KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))
+#endif
+#define SCTP_TCB_UNLOCK(_tcb) \
+ KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx not locked", __func__))
+#else
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb) do { \
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
+ sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
+ (void)pthread_mutex_lock(&(_tcb)->tcb_mtx); \
+} while (0)
+#else
+#define SCTP_TCB_LOCK(_tcb) \
+ (void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
+#endif
+#define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
+#endif
+#define SCTP_TCB_LOCK_ASSERT(_tcb) \
+ KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s: tcb_mtx not locked", __func__))
+#define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
+#endif
+
+#endif /* SCTP_PER_SOCKET_LOCKING */
+
+
+/*
+ * common locks
+ */
+
+/* copied over to compile */
+#define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+#define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
+#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+/* socket locks */
+
+#if defined(_WIN32)
+#define SOCKBUF_LOCK_ASSERT(_so_buf)
+#define SOCKBUF_LOCK(_so_buf) \
+ EnterCriticalSection(&(_so_buf)->sb_mtx)
+#define SOCKBUF_UNLOCK(_so_buf) \
+ LeaveCriticalSection(&(_so_buf)->sb_mtx)
+#define SOCK_LOCK(_so) \
+ SOCKBUF_LOCK(&(_so)->so_rcv)
+#define SOCK_UNLOCK(_so) \
+ SOCKBUF_UNLOCK(&(_so)->so_rcv)
+#else
+#define SOCKBUF_LOCK_ASSERT(_so_buf) \
+ KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
+#ifdef INVARIANTS
+#define SOCKBUF_LOCK(_so_buf) \
+ KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx already locked", __func__))
+#define SOCKBUF_UNLOCK(_so_buf) \
+ KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx not locked", __func__))
+#else
+#define SOCKBUF_LOCK(_so_buf) \
+ pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
+#define SOCKBUF_UNLOCK(_so_buf) \
+ pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
+#endif
+#define SOCK_LOCK(_so) \
+ SOCKBUF_LOCK(&(_so)->so_rcv)
+#define SOCK_UNLOCK(_so) \
+ SOCKBUF_UNLOCK(&(_so)->so_rcv)
+#endif
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#if defined(_WIN32)
+/* address list locks */
+#define SCTP_IPI_ADDR_INIT() \
+ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_DESTROY() \
+ DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_RLOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_RUNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_WLOCK() \
+ EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_WUNLOCK() \
+ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_LOCK_ASSERT()
+#define SCTP_IPI_ADDR_WLOCK_ASSERT()
+
+
+/* iterator locks */
+#define SCTP_ITERATOR_LOCK_INIT() \
+ InitializeCriticalSection(&sctp_it_ctl.it_mtx)
+#define SCTP_ITERATOR_LOCK_DESTROY() \
+ DeleteCriticalSection(&sctp_it_ctl.it_mtx)
+#define SCTP_ITERATOR_LOCK() \
+ EnterCriticalSection(&sctp_it_ctl.it_mtx)
+#define SCTP_ITERATOR_UNLOCK() \
+ LeaveCriticalSection(&sctp_it_ctl.it_mtx)
+
+#define SCTP_IPI_ITERATOR_WQ_INIT() \
+ InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
+ DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#define SCTP_IPI_ITERATOR_WQ_LOCK() \
+ EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
+ LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#else
+/* address list locks */
+#define SCTP_IPI_ADDR_INIT() \
+ (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_IPI_ADDR_DESTROY() \
+ (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
+#ifdef INVARIANTS
+#define SCTP_IPI_ADDR_RLOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
+#define SCTP_IPI_ADDR_RUNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
+#define SCTP_IPI_ADDR_WLOCK() \
+ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
+#define SCTP_IPI_ADDR_WUNLOCK() \
+ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
+#define SCTP_IPI_ADDR_LOCK_ASSERT() \
+ KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__))
+#define SCTP_IPI_ADDR_WLOCK_ASSERT() \
+ KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__))
+#else
+#define SCTP_IPI_ADDR_RLOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_RUNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_WLOCK() \
+ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_WUNLOCK() \
+ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_LOCK_ASSERT()
+#define SCTP_IPI_ADDR_WLOCK_ASSERT()
+#endif
+
+/* iterator locks */
+#define SCTP_ITERATOR_LOCK_INIT() \
+ (void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_ITERATOR_LOCK_DESTROY() \
+ (void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
+#ifdef INVARIANTS
+#define SCTP_ITERATOR_LOCK() \
+ KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx already locked", __func__))
+#define SCTP_ITERATOR_UNLOCK() \
+ KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx not locked", __func__))
+#else
+#define SCTP_ITERATOR_LOCK() \
+ (void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
+#define SCTP_ITERATOR_UNLOCK() \
+ (void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
+#endif
+
+#define SCTP_IPI_ITERATOR_WQ_INIT() \
+ (void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
+ (void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#ifdef INVARIANTS
+#define SCTP_IPI_ITERATOR_WQ_LOCK() \
+ KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx already locked", __func__))
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
+ KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx not locked", __func__))
+#else
+#define SCTP_IPI_ITERATOR_WQ_LOCK() \
+ (void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
+ (void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#endif
+#endif
+
+#define SCTP_INCR_EP_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
+
+#define SCTP_DECR_EP_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
+
+#define SCTP_INCR_ASOC_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
+
+#define SCTP_DECR_ASOC_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
+
+#define SCTP_INCR_LADDR_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
+
+#define SCTP_DECR_LADDR_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
+
+#define SCTP_INCR_RADDR_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
+
+#define SCTP_DECR_RADDR_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
+
+#define SCTP_INCR_CHK_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
+
+#define SCTP_DECR_CHK_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
+
+#define SCTP_INCR_READQ_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
+
+#define SCTP_DECR_READQ_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.c
new file mode 100644
index 000000000..db0e7533f
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.c
@@ -0,0 +1,329 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2013, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2013, by Lally Singh. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <netinet/sctp_sha1.h>
+
+#if defined(SCTP_USE_NSS_SHA1)
+/* A SHA-1 Digest is 160 bits, or 20 bytes */
+#define SHA_DIGEST_LENGTH (20)
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+ ctx->pk11_ctx = PK11_CreateDigestContext(SEC_OID_SHA1);
+ PK11_DigestBegin(ctx->pk11_ctx);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+ PK11_DigestOp(ctx->pk11_ctx, ptr, siz);
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+ unsigned int output_len = 0;
+
+ PK11_DigestFinal(ctx->pk11_ctx, digest, &output_len, SHA_DIGEST_LENGTH);
+ PK11_DestroyContext(ctx->pk11_ctx, PR_TRUE);
+}
+
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+ SHA1_Init(&ctx->sha_ctx);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+ SHA1_Update(&ctx->sha_ctx, ptr, (unsigned long)siz);
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+ SHA1_Final(digest, &ctx->sha_ctx);
+}
+
+#else
+
+#include <string.h>
+#if defined(_WIN32) && defined(__Userspace__)
+#include <winsock2.h>
+#elif !(defined(_WIN32) && !defined(__Userspace__))
+#include <arpa/inet.h>
+#endif
+
+#define F1(B,C,D) (((B & C) | ((~B) & D))) /* 0 <= t <= 19 */
+#define F2(B,C,D) (B ^ C ^ D) /* 20 <= t <= 39 */
+#define F3(B,C,D) ((B & C) | (B & D) | (C & D)) /* 40 <= t <= 59 */
+#define F4(B,C,D) (B ^ C ^ D) /* 600 <= t <= 79 */
+
+/* circular shift */
+#define CSHIFT(A,B) ((B << A) | (B >> (32-A)))
+
+#define K1 0x5a827999 /* 0 <= t <= 19 */
+#define K2 0x6ed9eba1 /* 20 <= t <= 39 */
+#define K3 0x8f1bbcdc /* 40 <= t <= 59 */
+#define K4 0xca62c1d6 /* 60 <= t <= 79 */
+
+#define H0INIT 0x67452301
+#define H1INIT 0xefcdab89
+#define H2INIT 0x98badcfe
+#define H3INIT 0x10325476
+#define H4INIT 0xc3d2e1f0
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+ /* Init the SHA-1 context structure */
+ ctx->A = 0;
+ ctx->B = 0;
+ ctx->C = 0;
+ ctx->D = 0;
+ ctx->E = 0;
+ ctx->H0 = H0INIT;
+ ctx->H1 = H1INIT;
+ ctx->H2 = H2INIT;
+ ctx->H3 = H3INIT;
+ ctx->H4 = H4INIT;
+ ctx->TEMP = 0;
+ memset(ctx->words, 0, sizeof(ctx->words));
+ ctx->how_many_in_block = 0;
+ ctx->running_total = 0;
+}
+
+static void
+sctp_sha1_process_a_block(struct sctp_sha1_context *ctx, unsigned int *block)
+{
+ int i;
+
+ /* init the W0-W15 to the block of words being hashed. */
+ /* step a) */
+ for (i = 0; i < 16; i++) {
+ ctx->words[i] = ntohl(block[i]);
+ }
+ /* now init the rest based on the SHA-1 formula, step b) */
+ for (i = 16; i < 80; i++) {
+ ctx->words[i] = CSHIFT(1, ((ctx->words[(i - 3)]) ^
+ (ctx->words[(i - 8)]) ^
+ (ctx->words[(i - 14)]) ^
+ (ctx->words[(i - 16)])));
+ }
+ /* step c) */
+ ctx->A = ctx->H0;
+ ctx->B = ctx->H1;
+ ctx->C = ctx->H2;
+ ctx->D = ctx->H3;
+ ctx->E = ctx->H4;
+
+ /* step d) */
+ for (i = 0; i < 80; i++) {
+ if (i < 20) {
+ ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+ (F1(ctx->B, ctx->C, ctx->D)) +
+ (ctx->E) +
+ ctx->words[i] +
+ K1);
+ } else if (i < 40) {
+ ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+ (F2(ctx->B, ctx->C, ctx->D)) +
+ (ctx->E) +
+ (ctx->words[i]) +
+ K2);
+ } else if (i < 60) {
+ ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+ (F3(ctx->B, ctx->C, ctx->D)) +
+ (ctx->E) +
+ (ctx->words[i]) +
+ K3);
+ } else {
+ ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+ (F4(ctx->B, ctx->C, ctx->D)) +
+ (ctx->E) +
+ (ctx->words[i]) +
+ K4);
+ }
+ ctx->E = ctx->D;
+ ctx->D = ctx->C;
+ ctx->C = CSHIFT(30, ctx->B);
+ ctx->B = ctx->A;
+ ctx->A = ctx->TEMP;
+ }
+ /* step e) */
+ ctx->H0 = (ctx->H0) + (ctx->A);
+ ctx->H1 = (ctx->H1) + (ctx->B);
+ ctx->H2 = (ctx->H2) + (ctx->C);
+ ctx->H3 = (ctx->H3) + (ctx->D);
+ ctx->H4 = (ctx->H4) + (ctx->E);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+ unsigned int number_left, left_to_fill;
+
+ number_left = siz;
+ while (number_left > 0) {
+ left_to_fill = sizeof(ctx->sha_block) - ctx->how_many_in_block;
+ if (left_to_fill > number_left) {
+ /* can only partially fill up this one */
+ memcpy(&ctx->sha_block[ctx->how_many_in_block],
+ ptr, number_left);
+ ctx->how_many_in_block += number_left;
+ ctx->running_total += number_left;
+ break;
+ } else {
+ /* block is now full, process it */
+ memcpy(&ctx->sha_block[ctx->how_many_in_block],
+ ptr, left_to_fill);
+ sctp_sha1_process_a_block(ctx,
+ (unsigned int *)ctx->sha_block);
+ number_left -= left_to_fill;
+ ctx->running_total += left_to_fill;
+ ctx->how_many_in_block = 0;
+ ptr = (const unsigned char *)(ptr + left_to_fill);
+ }
+ }
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+ /*
+ * if any left in block fill with padding and process. Then transfer
+ * the digest to the pointer. At the last block some special rules
+ * need to apply. We must add a 1 bit following the message, then we
+ * pad with 0's. The total size is encoded as a 64 bit number at the
+ * end. Now if the last buffer has more than 55 octets in it we
+ * cannot fit the 64 bit number + 10000000 pad on the end and must
+ * add the 10000000 pad, pad the rest of the message with 0's and
+ * then create an all 0 message with just the 64 bit size at the end
+ * and run this block through by itself. Also the 64 bit int must
+ * be in network byte order.
+ */
+ int left_to_fill;
+ unsigned int i, *ptr;
+
+ if (ctx->how_many_in_block > 55) {
+ /*
+ * special case, we need to process two blocks here. One for
+ * the current stuff plus possibly the pad. The other for
+ * the size.
+ */
+ left_to_fill = sizeof(ctx->sha_block) - ctx->how_many_in_block;
+ if (left_to_fill == 0) {
+ /* Should not really happen but I am paranoid */
+ sctp_sha1_process_a_block(ctx,
+ (unsigned int *)ctx->sha_block);
+ /* init last block, a bit different than the rest */
+ ctx->sha_block[0] = '\x80';
+ for (i = 1; i < sizeof(ctx->sha_block); i++) {
+ ctx->sha_block[i] = 0x0;
+ }
+ } else if (left_to_fill == 1) {
+ ctx->sha_block[ctx->how_many_in_block] = '\x80';
+ sctp_sha1_process_a_block(ctx,
+ (unsigned int *)ctx->sha_block);
+ /* init last block */
+ memset(ctx->sha_block, 0, sizeof(ctx->sha_block));
+ } else {
+ ctx->sha_block[ctx->how_many_in_block] = '\x80';
+ for (i = (ctx->how_many_in_block + 1);
+ i < sizeof(ctx->sha_block);
+ i++) {
+ ctx->sha_block[i] = 0x0;
+ }
+ sctp_sha1_process_a_block(ctx,
+ (unsigned int *)ctx->sha_block);
+ /* init last block */
+ memset(ctx->sha_block, 0, sizeof(ctx->sha_block));
+ }
+ /* This is in bits so multiply by 8 */
+ ctx->running_total *= 8;
+ ptr = (unsigned int *)&ctx->sha_block[60];
+ *ptr = htonl(ctx->running_total);
+ sctp_sha1_process_a_block(ctx, (unsigned int *)ctx->sha_block);
+ } else {
+ /*
+ * easy case, we just pad this message to size - end with 0
+ * add the magic 0x80 to the next word and then put the
+ * network byte order size in the last spot and process the
+ * block.
+ */
+ ctx->sha_block[ctx->how_many_in_block] = '\x80';
+ for (i = (ctx->how_many_in_block + 1);
+ i < sizeof(ctx->sha_block);
+ i++) {
+ ctx->sha_block[i] = 0x0;
+ }
+ /* get last int spot */
+ ctx->running_total *= 8;
+ ptr = (unsigned int *)&ctx->sha_block[60];
+ *ptr = htonl(ctx->running_total);
+ sctp_sha1_process_a_block(ctx, (unsigned int *)ctx->sha_block);
+ }
+ /* transfer the digest back to the user */
+ digest[3] = (ctx->H0 & 0xff);
+ digest[2] = ((ctx->H0 >> 8) & 0xff);
+ digest[1] = ((ctx->H0 >> 16) & 0xff);
+ digest[0] = ((ctx->H0 >> 24) & 0xff);
+
+ digest[7] = (ctx->H1 & 0xff);
+ digest[6] = ((ctx->H1 >> 8) & 0xff);
+ digest[5] = ((ctx->H1 >> 16) & 0xff);
+ digest[4] = ((ctx->H1 >> 24) & 0xff);
+
+ digest[11] = (ctx->H2 & 0xff);
+ digest[10] = ((ctx->H2 >> 8) & 0xff);
+ digest[9] = ((ctx->H2 >> 16) & 0xff);
+ digest[8] = ((ctx->H2 >> 24) & 0xff);
+
+ digest[15] = (ctx->H3 & 0xff);
+ digest[14] = ((ctx->H3 >> 8) & 0xff);
+ digest[13] = ((ctx->H3 >> 16) & 0xff);
+ digest[12] = ((ctx->H3 >> 24) & 0xff);
+
+ digest[19] = (ctx->H4 & 0xff);
+ digest[18] = ((ctx->H4 >> 8) & 0xff);
+ digest[17] = ((ctx->H4 >> 16) & 0xff);
+ digest[16] = ((ctx->H4 >> 24) & 0xff);
+}
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.h
new file mode 100644
index 000000000..d535ee463
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sha1.h
@@ -0,0 +1,90 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+
+#ifndef __NETINET_SCTP_SHA1_H__
+#define __NETINET_SCTP_SHA1_H__
+
+#include <sys/types.h>
+#if defined(SCTP_USE_NSS_SHA1)
+#include <pk11pub.h>
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+#include <openssl/sha.h>
+#endif
+
+struct sctp_sha1_context {
+#if defined(SCTP_USE_NSS_SHA1)
+ struct PK11Context *pk11_ctx;
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+ SHA_CTX sha_ctx;
+#else
+ unsigned int A;
+ unsigned int B;
+ unsigned int C;
+ unsigned int D;
+ unsigned int E;
+ unsigned int H0;
+ unsigned int H1;
+ unsigned int H2;
+ unsigned int H3;
+ unsigned int H4;
+ unsigned int words[80];
+ unsigned int TEMP;
+ /* block I am collecting to process */
+ char sha_block[64];
+ /* collected so far */
+ int how_many_in_block;
+ unsigned int running_total;
+#endif
+};
+
+#if (defined(__APPLE__) && !defined(__Userspace__) && defined(KERNEL))
+#ifndef _KERNEL
+#define _KERNEL
+#endif
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+void sctp_sha1_init(struct sctp_sha1_context *);
+void sctp_sha1_update(struct sctp_sha1_context *, const unsigned char *, unsigned int);
+void sctp_sha1_final(unsigned char *, struct sctp_sha1_context *);
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_ss_functions.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_ss_functions.c
new file mode 100644
index 000000000..fad42467c
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_ss_functions.c
@@ -0,0 +1,1130 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_ss_functions.c 362173 2020-06-14 09:50:00Z tuexen $");
+#endif
+
+#include <netinet/sctp_pcb.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_os_userspace.h>
+#endif
+
+/*
+ * Default simple round-robin algorithm.
+ * Just interates the streams in the order they appear.
+ */
+
+static void
+sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_stream_out *,
+ struct sctp_stream_queue_pending *, int);
+
+static void
+sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_stream_out *,
+ struct sctp_stream_queue_pending *, int);
+
+static void
+sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int holds_lock)
+{
+ uint16_t i;
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ asoc->ss_data.locked_on_sending = NULL;
+ asoc->ss_data.last_out_stream = NULL;
+ TAILQ_INIT(&asoc->ss_data.out.wheel);
+ /*
+ * If there is data in the stream queues already,
+ * the scheduler of an existing association has
+ * been changed. We need to add all stream queues
+ * to the wheel.
+ */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc,
+ &stcb->asoc.strmout[i],
+ NULL, 1);
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int clear_values SCTP_UNUSED, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ struct sctp_stream_out *strq;
+
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+ strq->ss_params.rr.next_spoke.tqe_next = NULL;
+ strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+ }
+ asoc->ss_data.last_out_stream = NULL;
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+ if (with_strq != NULL) {
+ if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ }
+ if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+ stcb->asoc.ss_data.last_out_stream = strq;
+ }
+ }
+ strq->ss_params.rr.next_spoke.tqe_next = NULL;
+ strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+ return;
+}
+
+static void
+sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq,
+ struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* Add to wheel if not already on it and stream queue not empty */
+ if (!TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.rr.next_spoke.tqe_next == NULL) &&
+ (strq->ss_params.rr.next_spoke.tqe_prev == NULL)) {
+ TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
+ strq, ss_params.rr.next_spoke);
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static int
+sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
+{
+ if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static void
+sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq,
+ struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* Remove from wheel if stream queue is empty and actually is on the wheel */
+ if (TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.rr.next_spoke.tqe_next != NULL ||
+ strq->ss_params.rr.next_spoke.tqe_prev != NULL)) {
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
+ sctpwheel_listhead,
+ ss_params.rr.next_spoke);
+ if (asoc->ss_data.last_out_stream == NULL) {
+ asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+ sctpwheel_listhead);
+ }
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = NULL;
+ }
+ }
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+ strq->ss_params.rr.next_spoke.tqe_next = NULL;
+ strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+
+static struct sctp_stream_out *
+sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+ struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq, *strqt;
+
+ if (asoc->ss_data.locked_on_sending) {
+ return (asoc->ss_data.locked_on_sending);
+ }
+ strqt = asoc->ss_data.last_out_stream;
+default_again:
+ /* Find the next stream to use */
+ if (strqt == NULL) {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ } else {
+ strq = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+ if (strq == NULL) {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ }
+ }
+
+ /* If CMT is off, we must validate that
+ * the stream in question has the first
+ * item pointed towards are network destination
+ * requested by the caller. Note that if we
+ * turn out to be locked to a stream (assigning
+ * TSN's then we must stop, since we cannot
+ * look for another stream with data to send
+ * to that destination). In CMT's case, by
+ * skipping this check, we will send one
+ * data packet towards the requested net.
+ */
+ if (net != NULL && strq != NULL &&
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+ if (TAILQ_FIRST(&strq->outqueue) &&
+ TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+ TAILQ_FIRST(&strq->outqueue)->net != net) {
+ if (strq == asoc->ss_data.last_out_stream) {
+ return (NULL);
+ } else {
+ strqt = strq;
+ goto default_again;
+ }
+ }
+ }
+ return (strq);
+}
+
+static void
+sctp_ss_default_scheduled(struct sctp_tcb *stcb,
+ struct sctp_nets *net SCTP_UNUSED,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq,
+ int moved_how_much SCTP_UNUSED)
+{
+ struct sctp_stream_queue_pending *sp;
+
+ asoc->ss_data.last_out_stream = strq;
+ if (stcb->asoc.idata_supported == 0) {
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if ((sp != NULL) && (sp->some_taken == 1)) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ } else {
+ stcb->asoc.ss_data.locked_on_sending = NULL;
+ }
+ } else {
+ stcb->asoc.ss_data.locked_on_sending = NULL;
+ }
+ return;
+}
+
+static void
+sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
+ struct sctp_association *asoc SCTP_UNUSED)
+{
+ /* Nothing to be done here */
+ return;
+}
+
+static int
+sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+ struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
+{
+ /* Nothing to be done here */
+ return (-1);
+}
+
+static int
+sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+ struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
+{
+ /* Nothing to be done here */
+ return (-1);
+}
+
+static int
+sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq;
+ struct sctp_stream_queue_pending *sp;
+
+ if (asoc->stream_queue_cnt != 1) {
+ return (0);
+ }
+ strq = asoc->ss_data.locked_on_sending;
+ if (strq == NULL) {
+ return (0);
+ }
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp == NULL) {
+ return (0);
+ }
+ return (!sp->msg_is_complete);
+}
+
+/*
+ * Real round-robin algorithm.
+ * Always interates the streams in ascending order.
+ */
+static void
+sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq,
+ struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+ struct sctp_stream_out *strqt;
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if (!TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.rr.next_spoke.tqe_next == NULL) &&
+ (strq->ss_params.rr.next_spoke.tqe_prev == NULL)) {
+ if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+ } else {
+ strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ while (strqt != NULL && (strqt->sid < strq->sid)) {
+ strqt = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+ }
+ if (strqt != NULL) {
+ TAILQ_INSERT_BEFORE(strqt, strq, ss_params.rr.next_spoke);
+ } else {
+ TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+ }
+ }
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+/*
+ * Real round-robin per packet algorithm.
+ * Always interates the streams in ascending order and
+ * only fills messages of the same stream in a packet.
+ */
+static struct sctp_stream_out *
+sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
+ struct sctp_association *asoc)
+{
+ return (asoc->ss_data.last_out_stream);
+}
+
+static void
+sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+ struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq, *strqt;
+
+ strqt = asoc->ss_data.last_out_stream;
+rrp_again:
+ /* Find the next stream to use */
+ if (strqt == NULL) {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ } else {
+ strq = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+ if (strq == NULL) {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ }
+ }
+
+ /* If CMT is off, we must validate that
+ * the stream in question has the first
+ * item pointed towards are network destination
+ * requested by the caller. Note that if we
+ * turn out to be locked to a stream (assigning
+ * TSN's then we must stop, since we cannot
+ * look for another stream with data to send
+ * to that destination). In CMT's case, by
+ * skipping this check, we will send one
+ * data packet towards the requested net.
+ */
+ if (net != NULL && strq != NULL &&
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+ if (TAILQ_FIRST(&strq->outqueue) &&
+ TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+ TAILQ_FIRST(&strq->outqueue)->net != net) {
+ if (strq == asoc->ss_data.last_out_stream) {
+ strq = NULL;
+ } else {
+ strqt = strq;
+ goto rrp_again;
+ }
+ }
+ }
+ asoc->ss_data.last_out_stream = strq;
+ return;
+}
+
+
+/*
+ * Priority algorithm.
+ * Always prefers streams based on their priority id.
+ */
+static void
+sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int clear_values, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ struct sctp_stream_out *strq;
+
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ if (clear_values) {
+ strq->ss_params.prio.priority = 0;
+ }
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+ strq->ss_params.prio.next_spoke.tqe_next = NULL;
+ strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+
+ }
+ asoc->ss_data.last_out_stream = NULL;
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+ if (with_strq != NULL) {
+ if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ }
+ if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+ stcb->asoc.ss_data.last_out_stream = strq;
+ }
+ }
+ strq->ss_params.prio.next_spoke.tqe_next = NULL;
+ strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+ if (with_strq != NULL) {
+ strq->ss_params.prio.priority = with_strq->ss_params.prio.priority;
+ } else {
+ strq->ss_params.prio.priority = 0;
+ }
+ return;
+}
+
+static void
+sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+ int holds_lock)
+{
+ struct sctp_stream_out *strqt;
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* Add to wheel if not already on it and stream queue not empty */
+ if (!TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.prio.next_spoke.tqe_next == NULL) &&
+ (strq->ss_params.prio.next_spoke.tqe_prev == NULL)) {
+ if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+ } else {
+ strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ while (strqt != NULL && strqt->ss_params.prio.priority < strq->ss_params.prio.priority) {
+ strqt = TAILQ_NEXT(strqt, ss_params.prio.next_spoke);
+ }
+ if (strqt != NULL) {
+ TAILQ_INSERT_BEFORE(strqt, strq, ss_params.prio.next_spoke);
+ } else {
+ TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+ }
+ }
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+ int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* Remove from wheel if stream queue is empty and actually is on the wheel */
+ if (TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.prio.next_spoke.tqe_next != NULL ||
+ strq->ss_params.prio.next_spoke.tqe_prev != NULL)) {
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, sctpwheel_listhead,
+ ss_params.prio.next_spoke);
+ if (asoc->ss_data.last_out_stream == NULL) {
+ asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+ sctpwheel_listhead);
+ }
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = NULL;
+ }
+ }
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+ strq->ss_params.prio.next_spoke.tqe_next = NULL;
+ strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static struct sctp_stream_out*
+sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+ struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq, *strqt, *strqn;
+
+ if (asoc->ss_data.locked_on_sending) {
+ return (asoc->ss_data.locked_on_sending);
+ }
+ strqt = asoc->ss_data.last_out_stream;
+prio_again:
+ /* Find the next stream to use */
+ if (strqt == NULL) {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ } else {
+ strqn = TAILQ_NEXT(strqt, ss_params.prio.next_spoke);
+ if (strqn != NULL &&
+ strqn->ss_params.prio.priority == strqt->ss_params.prio.priority) {
+ strq = strqn;
+ } else {
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ }
+ }
+
+ /* If CMT is off, we must validate that
+ * the stream in question has the first
+ * item pointed towards are network destination
+ * requested by the caller. Note that if we
+ * turn out to be locked to a stream (assigning
+ * TSN's then we must stop, since we cannot
+ * look for another stream with data to send
+ * to that destination). In CMT's case, by
+ * skipping this check, we will send one
+ * data packet towards the requested net.
+ */
+ if (net != NULL && strq != NULL &&
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+ if (TAILQ_FIRST(&strq->outqueue) &&
+ TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+ TAILQ_FIRST(&strq->outqueue)->net != net) {
+ if (strq == asoc->ss_data.last_out_stream) {
+ return (NULL);
+ } else {
+ strqt = strq;
+ goto prio_again;
+ }
+ }
+ }
+ return (strq);
+}
+
+static int
+sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+ struct sctp_stream_out *strq, uint16_t *value)
+{
+ if (strq == NULL) {
+ return (-1);
+ }
+ *value = strq->ss_params.prio.priority;
+ return (1);
+}
+
+static int
+sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, uint16_t value)
+{
+ if (strq == NULL) {
+ return (-1);
+ }
+ strq->ss_params.prio.priority = value;
+ sctp_ss_prio_remove(stcb, asoc, strq, NULL, 1);
+ sctp_ss_prio_add(stcb, asoc, strq, NULL, 1);
+ return (1);
+}
+
+/*
+ * Fair bandwidth algorithm.
+ * Maintains an equal troughput per stream.
+ */
+static void
+sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int clear_values, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+ struct sctp_stream_out *strq;
+
+ strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ if (clear_values) {
+ strq->ss_params.fb.rounds = -1;
+ }
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.fb.next_spoke);
+ strq->ss_params.fb.next_spoke.tqe_next = NULL;
+ strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+ }
+ asoc->ss_data.last_out_stream = NULL;
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+ if (with_strq != NULL) {
+ if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ }
+ if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+ stcb->asoc.ss_data.last_out_stream = strq;
+ }
+ }
+ strq->ss_params.fb.next_spoke.tqe_next = NULL;
+ strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+ if (with_strq != NULL) {
+ strq->ss_params.fb.rounds = with_strq->ss_params.fb.rounds;
+ } else {
+ strq->ss_params.fb.rounds = -1;
+ }
+ return;
+}
+
+static void
+sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+ int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if (!TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.fb.next_spoke.tqe_next == NULL) &&
+ (strq->ss_params.fb.next_spoke.tqe_prev == NULL)) {
+ if (strq->ss_params.fb.rounds < 0)
+ strq->ss_params.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
+ TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.fb.next_spoke);
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+ int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* Remove from wheel if stream queue is empty and actually is on the wheel */
+ if (TAILQ_EMPTY(&strq->outqueue) &&
+ (strq->ss_params.fb.next_spoke.tqe_next != NULL ||
+ strq->ss_params.fb.next_spoke.tqe_prev != NULL)) {
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, sctpwheel_listhead,
+ ss_params.fb.next_spoke);
+ if (asoc->ss_data.last_out_stream == NULL) {
+ asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+ sctpwheel_listhead);
+ }
+ if (asoc->ss_data.last_out_stream == strq) {
+ asoc->ss_data.last_out_stream = NULL;
+ }
+ }
+ TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.fb.next_spoke);
+ strq->ss_params.fb.next_spoke.tqe_next = NULL;
+ strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static struct sctp_stream_out*
+sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+ struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq = NULL, *strqt;
+
+ if (asoc->ss_data.locked_on_sending) {
+ return (asoc->ss_data.locked_on_sending);
+ }
+ if (asoc->ss_data.last_out_stream == NULL ||
+ TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
+ strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ } else {
+ strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.fb.next_spoke);
+ }
+ do {
+ if ((strqt != NULL) &&
+ ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
+ (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
+ (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
+ (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
+ TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
+ if ((strqt->ss_params.fb.rounds >= 0) && (strq == NULL ||
+ strqt->ss_params.fb.rounds < strq->ss_params.fb.rounds)) {
+ strq = strqt;
+ }
+ }
+ if (strqt != NULL) {
+ strqt = TAILQ_NEXT(strqt, ss_params.fb.next_spoke);
+ } else {
+ strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+ }
+ } while (strqt != strq);
+ return (strq);
+}
+
+static void
+sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
+ struct sctp_association *asoc, struct sctp_stream_out *strq,
+ int moved_how_much SCTP_UNUSED)
+{
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_stream_out *strqt;
+ int subtract;
+
+ if (stcb->asoc.idata_supported == 0) {
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if ((sp != NULL) && (sp->some_taken == 1)) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ } else {
+ stcb->asoc.ss_data.locked_on_sending = NULL;
+ }
+ } else {
+ stcb->asoc.ss_data.locked_on_sending = NULL;
+ }
+ subtract = strq->ss_params.fb.rounds;
+ TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.fb.next_spoke) {
+ strqt->ss_params.fb.rounds -= subtract;
+ if (strqt->ss_params.fb.rounds < 0)
+ strqt->ss_params.fb.rounds = 0;
+ }
+ if (TAILQ_FIRST(&strq->outqueue)) {
+ strq->ss_params.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
+ } else {
+ strq->ss_params.fb.rounds = -1;
+ }
+ asoc->ss_data.last_out_stream = strq;
+ return;
+}
+
+/*
+ * First-come, first-serve algorithm.
+ * Maintains the order provided by the application.
+ */
+static void
+sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq SCTP_UNUSED,
+ struct sctp_stream_queue_pending *sp, int holds_lock);
+
+static void
+sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int holds_lock)
+{
+ uint32_t x, n = 0, add_more = 1;
+ struct sctp_stream_queue_pending *sp;
+ uint16_t i;
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ TAILQ_INIT(&asoc->ss_data.out.list);
+ /*
+ * If there is data in the stream queues already,
+ * the scheduler of an existing association has
+ * been changed. We can only cycle through the
+ * stream queues and add everything to the FCFS
+ * queue.
+ */
+ while (add_more) {
+ add_more = 0;
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
+ x = 0;
+ /* Find n. message in current stream queue */
+ while (sp != NULL && x < n) {
+ sp = TAILQ_NEXT(sp, next);
+ x++;
+ }
+ if (sp != NULL) {
+ sctp_ss_fcfs_add(stcb, &stcb->asoc, &stcb->asoc.strmout[i], sp, 1);
+ add_more = 1;
+ }
+ }
+ n++;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static void
+sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int clear_values, int holds_lock)
+{
+ struct sctp_stream_queue_pending *sp;
+
+ if (clear_values) {
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
+ sp = TAILQ_FIRST(&asoc->ss_data.out.list);
+ TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
+ sp->ss_next.tqe_next = NULL;
+ sp->ss_next.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ }
+ return;
+}
+
+static void
+sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+ if (with_strq != NULL) {
+ if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+ stcb->asoc.ss_data.locked_on_sending = strq;
+ }
+ if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+ stcb->asoc.ss_data.last_out_stream = strq;
+ }
+ }
+ strq->ss_params.fb.next_spoke.tqe_next = NULL;
+ strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+ return;
+}
+
+static void
+sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp,
+ int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if (sp && (sp->ss_next.tqe_next == NULL) &&
+ (sp->ss_next.tqe_prev == NULL)) {
+ TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+static int
+sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
+{
+ if (TAILQ_EMPTY(&asoc->ss_data.out.list)) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static void
+sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp,
+ int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if (sp &&
+ ((sp->ss_next.tqe_next != NULL) ||
+ (sp->ss_next.tqe_prev != NULL))) {
+ TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
+ sp->ss_next.tqe_next = NULL;
+ sp->ss_next.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ return;
+}
+
+
+static struct sctp_stream_out *
+sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+ struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq;
+ struct sctp_stream_queue_pending *sp;
+
+ if (asoc->ss_data.locked_on_sending) {
+ return (asoc->ss_data.locked_on_sending);
+ }
+ sp = TAILQ_FIRST(&asoc->ss_data.out.list);
+default_again:
+ if (sp != NULL) {
+ strq = &asoc->strmout[sp->sid];
+ } else {
+ strq = NULL;
+ }
+
+ /*
+ * If CMT is off, we must validate that
+ * the stream in question has the first
+ * item pointed towards are network destination
+ * requested by the caller. Note that if we
+ * turn out to be locked to a stream (assigning
+ * TSN's then we must stop, since we cannot
+ * look for another stream with data to send
+ * to that destination). In CMT's case, by
+ * skipping this check, we will send one
+ * data packet towards the requested net.
+ */
+ if (net != NULL && strq != NULL &&
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+ if (TAILQ_FIRST(&strq->outqueue) &&
+ TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+ TAILQ_FIRST(&strq->outqueue)->net != net) {
+ sp = TAILQ_NEXT(sp, ss_next);
+ goto default_again;
+ }
+ }
+ return (strq);
+}
+
+const struct sctp_ss_functions sctp_ss_functions[] = {
+/* SCTP_SS_DEFAULT */
+{
+#if defined(_WIN32)
+ sctp_ss_default_init,
+ sctp_ss_default_clear,
+ sctp_ss_default_init_stream,
+ sctp_ss_default_add,
+ sctp_ss_default_is_empty,
+ sctp_ss_default_remove,
+ sctp_ss_default_select,
+ sctp_ss_default_scheduled,
+ sctp_ss_default_packet_done,
+ sctp_ss_default_get_value,
+ sctp_ss_default_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_default_init,
+ .sctp_ss_clear = sctp_ss_default_clear,
+ .sctp_ss_init_stream = sctp_ss_default_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_default_add,
+ .sctp_ss_is_empty = sctp_ss_default_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_default_remove,
+ .sctp_ss_select_stream = sctp_ss_default_select,
+ .sctp_ss_scheduled = sctp_ss_default_scheduled,
+ .sctp_ss_packet_done = sctp_ss_default_packet_done,
+ .sctp_ss_get_value = sctp_ss_default_get_value,
+ .sctp_ss_set_value = sctp_ss_default_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_ROUND_ROBIN */
+{
+#if defined(_WIN32)
+ sctp_ss_default_init,
+ sctp_ss_default_clear,
+ sctp_ss_default_init_stream,
+ sctp_ss_rr_add,
+ sctp_ss_default_is_empty,
+ sctp_ss_default_remove,
+ sctp_ss_default_select,
+ sctp_ss_default_scheduled,
+ sctp_ss_default_packet_done,
+ sctp_ss_default_get_value,
+ sctp_ss_default_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_default_init,
+ .sctp_ss_clear = sctp_ss_default_clear,
+ .sctp_ss_init_stream = sctp_ss_default_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_rr_add,
+ .sctp_ss_is_empty = sctp_ss_default_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_default_remove,
+ .sctp_ss_select_stream = sctp_ss_default_select,
+ .sctp_ss_scheduled = sctp_ss_default_scheduled,
+ .sctp_ss_packet_done = sctp_ss_default_packet_done,
+ .sctp_ss_get_value = sctp_ss_default_get_value,
+ .sctp_ss_set_value = sctp_ss_default_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_ROUND_ROBIN_PACKET */
+{
+#if defined(_WIN32)
+ sctp_ss_default_init,
+ sctp_ss_default_clear,
+ sctp_ss_default_init_stream,
+ sctp_ss_rr_add,
+ sctp_ss_default_is_empty,
+ sctp_ss_default_remove,
+ sctp_ss_rrp_select,
+ sctp_ss_default_scheduled,
+ sctp_ss_rrp_packet_done,
+ sctp_ss_default_get_value,
+ sctp_ss_default_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_default_init,
+ .sctp_ss_clear = sctp_ss_default_clear,
+ .sctp_ss_init_stream = sctp_ss_default_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_rr_add,
+ .sctp_ss_is_empty = sctp_ss_default_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_default_remove,
+ .sctp_ss_select_stream = sctp_ss_rrp_select,
+ .sctp_ss_scheduled = sctp_ss_default_scheduled,
+ .sctp_ss_packet_done = sctp_ss_rrp_packet_done,
+ .sctp_ss_get_value = sctp_ss_default_get_value,
+ .sctp_ss_set_value = sctp_ss_default_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_PRIORITY */
+{
+#if defined(_WIN32)
+ sctp_ss_default_init,
+ sctp_ss_prio_clear,
+ sctp_ss_prio_init_stream,
+ sctp_ss_prio_add,
+ sctp_ss_default_is_empty,
+ sctp_ss_prio_remove,
+ sctp_ss_prio_select,
+ sctp_ss_default_scheduled,
+ sctp_ss_default_packet_done,
+ sctp_ss_prio_get_value,
+ sctp_ss_prio_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_default_init,
+ .sctp_ss_clear = sctp_ss_prio_clear,
+ .sctp_ss_init_stream = sctp_ss_prio_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_prio_add,
+ .sctp_ss_is_empty = sctp_ss_default_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_prio_remove,
+ .sctp_ss_select_stream = sctp_ss_prio_select,
+ .sctp_ss_scheduled = sctp_ss_default_scheduled,
+ .sctp_ss_packet_done = sctp_ss_default_packet_done,
+ .sctp_ss_get_value = sctp_ss_prio_get_value,
+ .sctp_ss_set_value = sctp_ss_prio_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_FAIR_BANDWITH */
+{
+#if defined(_WIN32)
+ sctp_ss_default_init,
+ sctp_ss_fb_clear,
+ sctp_ss_fb_init_stream,
+ sctp_ss_fb_add,
+ sctp_ss_default_is_empty,
+ sctp_ss_fb_remove,
+ sctp_ss_fb_select,
+ sctp_ss_fb_scheduled,
+ sctp_ss_default_packet_done,
+ sctp_ss_default_get_value,
+ sctp_ss_default_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_default_init,
+ .sctp_ss_clear = sctp_ss_fb_clear,
+ .sctp_ss_init_stream = sctp_ss_fb_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_fb_add,
+ .sctp_ss_is_empty = sctp_ss_default_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_fb_remove,
+ .sctp_ss_select_stream = sctp_ss_fb_select,
+ .sctp_ss_scheduled = sctp_ss_fb_scheduled,
+ .sctp_ss_packet_done = sctp_ss_default_packet_done,
+ .sctp_ss_get_value = sctp_ss_default_get_value,
+ .sctp_ss_set_value = sctp_ss_default_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_FIRST_COME */
+{
+#if defined(_WIN32)
+ sctp_ss_fcfs_init,
+ sctp_ss_fcfs_clear,
+ sctp_ss_fcfs_init_stream,
+ sctp_ss_fcfs_add,
+ sctp_ss_fcfs_is_empty,
+ sctp_ss_fcfs_remove,
+ sctp_ss_fcfs_select,
+ sctp_ss_default_scheduled,
+ sctp_ss_default_packet_done,
+ sctp_ss_default_get_value,
+ sctp_ss_default_set_value,
+ sctp_ss_default_is_user_msgs_incomplete
+#else
+ .sctp_ss_init = sctp_ss_fcfs_init,
+ .sctp_ss_clear = sctp_ss_fcfs_clear,
+ .sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
+ .sctp_ss_add_to_stream = sctp_ss_fcfs_add,
+ .sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
+ .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
+ .sctp_ss_select_stream = sctp_ss_fcfs_select,
+ .sctp_ss_scheduled = sctp_ss_default_scheduled,
+ .sctp_ss_packet_done = sctp_ss_default_packet_done,
+ .sctp_ss_get_value = sctp_ss_default_get_value,
+ .sctp_ss_set_value = sctp_ss_default_set_value,
+ .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+}
+};
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_structs.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_structs.h
new file mode 100644
index 000000000..3943b7566
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_structs.h
@@ -0,0 +1,1296 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 362106 2020-06-12 16:31:13Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_STRUCTS_H_
+#define _NETINET_SCTP_STRUCTS_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_auth.h>
+
+struct sctp_timer {
+ sctp_os_timer_t timer;
+
+ int type;
+ /*
+ * Depending on the timer type these will be setup and cast with the
+ * appropriate entity.
+ */
+ void *ep;
+ void *tcb;
+ void *net;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ void *vnet;
+#endif
+
+ /* for sanity checking */
+ void *self;
+ uint32_t ticks;
+ uint32_t stopped_from;
+};
+
+
+struct sctp_foo_stuff {
+ struct sctp_inpcb *inp;
+ uint32_t lineno;
+ uint32_t ticks;
+ int updown;
+};
+
+
+/*
+ * This is the information we track on each interface that we know about from
+ * the distant end.
+ */
+TAILQ_HEAD(sctpnetlisthead, sctp_nets);
+
+struct sctp_stream_reset_list {
+ TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
+ uint32_t seq;
+ uint32_t tsn;
+ uint32_t number_entries;
+ uint16_t list_of_streams[];
+};
+
+TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
+
+/*
+ * Users of the iterator need to malloc a iterator with a call to
+ * sctp_initiate_iterator(inp_func, assoc_func, inp_func, pcb_flags, pcb_features,
+ * asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
+ *
+ * Use the following two defines if you don't care what pcb flags are on the EP
+ * and/or you don't care what state the association is in.
+ *
+ * Note that if you specify an INP as the last argument then ONLY each
+ * association of that single INP will be executed upon. Note that the pcb
+ * flags STILL apply so if the inp you specify has different pcb_flags then
+ * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
+ * assure the inp you specify gets treated.
+ */
+#define SCTP_PCB_ANY_FLAGS 0x00000000
+#define SCTP_PCB_ANY_FEATURES 0x00000000
+#define SCTP_ASOC_ANY_STATE 0x00000000
+
+typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
+ uint32_t val);
+typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
+typedef void (*end_func) (void *ptr, uint32_t val);
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SCTP_MCORE_INPUT) && defined(SMP)
+/* whats on the mcore control struct */
+struct sctp_mcore_queue {
+ TAILQ_ENTRY(sctp_mcore_queue) next;
+ struct vnet *vn;
+ struct mbuf *m;
+ int off;
+ int v6;
+};
+
+TAILQ_HEAD(sctp_mcore_qhead, sctp_mcore_queue);
+
+struct sctp_mcore_ctrl {
+ SCTP_PROCESS_STRUCT thread_proc;
+ struct sctp_mcore_qhead que;
+ struct mtx core_mtx;
+ struct mtx que_mtx;
+ int running;
+ int cpuid;
+};
+#endif
+#endif
+
+struct sctp_iterator {
+ TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct vnet *vn;
+#endif
+ struct sctp_timer tmr;
+ struct sctp_inpcb *inp; /* current endpoint */
+ struct sctp_tcb *stcb; /* current* assoc */
+ struct sctp_inpcb *next_inp; /* special hook to skip to */
+ asoc_func function_assoc; /* per assoc function */
+ inp_func function_inp; /* per endpoint function */
+ inp_func function_inp_end; /* end INP function */
+ end_func function_atend; /* iterator completion function */
+ void *pointer; /* pointer for apply func to use */
+ uint32_t val; /* value for apply func to use */
+ uint32_t pcb_flags; /* endpoint flags being checked */
+ uint32_t pcb_features; /* endpoint features being checked */
+ uint32_t asoc_state; /* assoc state being checked */
+ uint32_t iterator_flags;
+ uint8_t no_chunk_output;
+ uint8_t done_current_ep;
+};
+/* iterator_flags values */
+#define SCTP_ITERATOR_DO_ALL_INP 0x00000001
+#define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
+
+
+TAILQ_HEAD(sctpiterators, sctp_iterator);
+
+struct sctp_copy_all {
+ struct sctp_inpcb *inp; /* ep */
+ struct mbuf *m;
+ struct sctp_sndrcvinfo sndrcv;
+ ssize_t sndlen;
+ int cnt_sent;
+ int cnt_failed;
+};
+
+struct sctp_asconf_iterator {
+ struct sctpladdr list_of_work;
+ int cnt;
+};
+
+struct iterator_control {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct mtx ipi_iterator_wq_mtx;
+ struct mtx it_mtx;
+#elif defined(__APPLE__) && !defined(__Userspace__)
+ lck_mtx_t *ipi_iterator_wq_mtx;
+ lck_mtx_t *it_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace__)
+ userland_mutex_t ipi_iterator_wq_mtx;
+ userland_mutex_t it_mtx;
+ userland_cond_t iterator_wakeup;
+#else
+ pthread_mutex_t ipi_iterator_wq_mtx;
+ pthread_mutex_t it_mtx;
+ pthread_cond_t iterator_wakeup;
+#endif
+#elif defined(_WIN32) && !defined(__Userspace__)
+ struct spinlock it_lock;
+ struct spinlock ipi_iterator_wq_lock;
+ KEVENT iterator_wakeup[2];
+ PFILE_OBJECT iterator_thread_obj;
+#else
+ void *it_mtx;
+#endif
+#if !(defined(_WIN32) && !defined(__Userspace__))
+#if !defined(__Userspace__)
+ SCTP_PROCESS_STRUCT thread_proc;
+#else
+ userland_thread_t thread_proc;
+#endif
+#endif
+ struct sctpiterators iteratorhead;
+ struct sctp_iterator *cur_it;
+ uint32_t iterator_running;
+ uint32_t iterator_flags;
+};
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+#define SCTP_ITERATOR_MUST_EXIT 0x00000001
+#define SCTP_ITERATOR_EXITED 0x00000002
+#endif
+#define SCTP_ITERATOR_STOP_CUR_IT 0x00000004
+#define SCTP_ITERATOR_STOP_CUR_INP 0x00000008
+
+struct sctp_net_route {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct nhop_object *ro_nh;
+ struct llentry *ro_lle;
+ char *ro_prepend;
+ uint16_t ro_plen;
+ uint16_t ro_flags;
+ uint16_t ro_mtu;
+ uint16_t spare;
+#else
+ sctp_rtentry_t *ro_rt;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN)
+ struct llentry *ro_lle;
+#endif
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+ struct ifaddr *ro_srcia;
+#endif
+#if !defined(APPLE_LEOPARD)
+ uint32_t ro_flags;
+#endif
+#endif
+ union sctp_sockstore _l_addr; /* remote peer addr */
+ struct sctp_ifa *_s_addr; /* our selected src addr */
+};
+
+struct htcp {
+ uint16_t alpha; /* Fixed point arith, << 7 */
+ uint8_t beta; /* Fixed point arith, << 7 */
+ uint8_t modeswitch; /* Delay modeswitch until we had at least one congestion event */
+ uint32_t last_cong; /* Time since last congestion event end */
+ uint32_t undo_last_cong;
+ uint16_t bytes_acked;
+ uint32_t bytecount;
+ uint32_t minRTT;
+ uint32_t maxRTT;
+
+ uint32_t undo_maxRTT;
+ uint32_t undo_old_maxB;
+
+ /* Bandwidth estimation */
+ uint32_t minB;
+ uint32_t maxB;
+ uint32_t old_maxB;
+ uint32_t Bi;
+ uint32_t lasttime;
+};
+
+struct rtcc_cc {
+ struct timeval tls; /* The time we started the sending */
+ uint64_t lbw; /* Our last estimated bw */
+ uint64_t lbw_rtt; /* RTT at bw estimate */
+ uint64_t bw_bytes; /* The total bytes since this sending began */
+ uint64_t bw_tot_time; /* The total time since sending began */
+ uint64_t new_tot_time; /* temp holding the new value */
+ uint64_t bw_bytes_at_last_rttc; /* What bw_bytes was at last rtt calc */
+ uint32_t cwnd_at_bw_set; /* Cwnd at last bw saved - lbw */
+ uint32_t vol_reduce; /* cnt of voluntary reductions */
+ uint16_t steady_step; /* The number required to be in steady state*/
+ uint16_t step_cnt; /* The current number */
+ uint8_t ret_from_eq; /* When all things are equal what do I return 0/1 - 1 no cc advance */
+ uint8_t use_dccc_ecn; /* Flag to enable DCCC ECN */
+ uint8_t tls_needs_set; /* Flag to indicate we need to set tls 0 or 1 means set at send 2 not */
+ uint8_t last_step_state; /* Last state if steady state stepdown is on */
+ uint8_t rtt_set_this_sack; /* Flag saying this sack had RTT calc on it */
+ uint8_t last_inst_ind; /* Last saved inst indication */
+};
+
+
+struct sctp_nets {
+ TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
+
+ /*
+ * Things on the top half may be able to be split into a common
+ * structure shared by all.
+ */
+ struct sctp_timer pmtu_timer;
+ struct sctp_timer hb_timer;
+
+ /*
+ * The following two in combination equate to a route entry for v6
+ * or v4.
+ */
+ struct sctp_net_route ro;
+
+ /* mtu discovered so far */
+ uint32_t mtu;
+ uint32_t ssthresh; /* not sure about this one for split */
+ uint32_t last_cwr_tsn;
+ uint32_t cwr_window_tsn;
+ uint32_t ecn_ce_pkt_cnt;
+ uint32_t lost_cnt;
+ /* smoothed average things for RTT and RTO itself */
+ int lastsa;
+ int lastsv;
+ uint64_t rtt; /* last measured rtt value in us */
+ uint32_t RTO;
+
+ /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
+ struct sctp_timer rxt_timer;
+
+ /* last time in seconds I sent to it */
+ struct timeval last_sent_time;
+ union cc_control_data {
+ struct htcp htcp_ca; /* JRS - struct used in HTCP algorithm */
+ struct rtcc_cc rtcc; /* rtcc module cc stuff */
+ } cc_mod;
+ int ref_count;
+
+ /* Congestion stats per destination */
+ /*
+ * flight size variables and such, sorry Vern, I could not avoid
+ * this if I wanted performance :>
+ */
+ uint32_t flight_size;
+ uint32_t cwnd; /* actual cwnd */
+ uint32_t prev_cwnd; /* cwnd before any processing */
+ uint32_t ecn_prev_cwnd; /* ECN prev cwnd at first ecn_echo seen in new window */
+ uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
+ /* tracking variables to avoid the aloc/free in sack processing */
+ unsigned int net_ack;
+ unsigned int net_ack2;
+
+ /*
+ * JRS - 5/8/07 - Variable to track last time
+ * a destination was active for CMT PF
+ */
+ uint32_t last_active;
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint32_t this_sack_highest_newack; /* tracks highest TSN newly
+ * acked for a given dest in
+ * the current SACK. Used in
+ * SFR and HTNA algos */
+ uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected
+ * pseudo-cumack for this destination */
+ uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next
+ * expected pseudo-cumack for this
+ * destination */
+
+ /* CMT fast recovery variables */
+ uint32_t fast_recovery_tsn;
+ uint32_t heartbeat_random1;
+ uint32_t heartbeat_random2;
+#ifdef INET6
+ uint32_t flowlabel;
+#endif
+ uint8_t dscp;
+
+ struct timeval start_time; /* time when this net was created */
+ uint32_t marked_retrans; /* number or DATA chunks marked for
+ timer based retransmissions */
+ uint32_t marked_fastretrans;
+ uint32_t heart_beat_delay; /* Heart Beat delay in ms */
+
+ /* if this guy is ok or not ... status */
+ uint16_t dest_state;
+ /* number of timeouts to consider the destination unreachable */
+ uint16_t failure_threshold;
+ /* number of timeouts to consider the destination potentially failed */
+ uint16_t pf_threshold;
+ /* error stats on the destination */
+ uint16_t error_count;
+ /* UDP port number in case of UDP tunneling */
+ uint16_t port;
+
+ uint8_t fast_retran_loss_recovery;
+ uint8_t will_exit_fast_recovery;
+ /* Flags that probably can be combined into dest_state */
+ uint8_t fast_retran_ip; /* fast retransmit in progress */
+ uint8_t hb_responded;
+ uint8_t saw_newack; /* CMT's SFR algorithm flag */
+ uint8_t src_addr_selected; /* if we split we move */
+ uint8_t indx_of_eligible_next_to_use;
+ uint8_t addr_is_local; /* its a local address (if known) could move
+ * in split */
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * find a new pseudocumack. This flag
+ * is set after a new pseudo-cumack
+ * has been received and indicates
+ * that the sender should find the
+ * next pseudo-cumack expected for
+ * this destination */
+ uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to
+ * find a new rtx-pseudocumack. This
+ * flag is set after a new
+ * rtx-pseudo-cumack has been received
+ * and indicates that the sender
+ * should find the next
+ * rtx-pseudo-cumack expected for this
+ * destination */
+ uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * indicate if a new pseudo-cumack or
+ * rtx-pseudo-cumack has been received */
+ uint8_t window_probe; /* Doing a window probe? */
+ uint8_t RTO_measured; /* Have we done the first measure */
+ uint8_t last_hs_used; /* index into the last HS table entry we used */
+ uint8_t lan_type;
+ uint8_t rto_needed;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint32_t flowid;
+ uint8_t flowtype;
+#endif
+};
+
+
+struct sctp_data_chunkrec {
+ uint32_t tsn; /* the TSN of this transmit */
+ uint32_t mid; /* the message identifier of this transmit */
+ uint16_t sid; /* the stream number of this guy */
+ uint32_t ppid;
+ uint32_t context; /* from send */
+ uint32_t cwnd_at_send;
+ /*
+ * part of the Highest sacked algorithm to be able to stroke counts
+ * on ones that are FR'd.
+ */
+ uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
+ struct timeval timetodrop; /* time we drop it from queue */
+ uint32_t fsn; /* Fragment Sequence Number */
+ uint8_t doing_fast_retransmit;
+ uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
+ * outbound holds sending flags for PR-SCTP.
+ */
+ uint8_t state_flags;
+ uint8_t chunk_was_revoked;
+ uint8_t fwd_tsn_cnt;
+};
+
+TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
+
+/* The lower byte is used to enumerate PR_SCTP policies */
+#define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL
+#define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF
+#define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX
+
+/* The upper byte is used as a bit mask */
+#define CHUNK_FLAGS_FRAGMENT_OK 0x0100
+
+struct chk_id {
+ uint8_t id;
+ uint8_t can_take_data;
+};
+
+
+struct sctp_tmit_chunk {
+ union {
+ struct sctp_data_chunkrec data;
+ struct chk_id chunk_id;
+ } rec;
+ struct sctp_association *asoc; /* bp to asoc this belongs to */
+ struct timeval sent_rcv_time; /* filled in if RTT being calculated */
+ struct mbuf *data; /* pointer to mbuf chain of data */
+ struct mbuf *last_mbuf; /* pointer to last mbuf in chain */
+ struct sctp_nets *whoTo;
+ TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
+ int32_t sent; /* the send status */
+ uint16_t snd_count; /* number of times I sent */
+ uint16_t flags; /* flags, such as FRAGMENT_OK */
+ uint16_t send_size;
+ uint16_t book_size;
+ uint16_t mbcnt;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref; /* flag if auth keyid refcount is held */
+ uint8_t pad_inplace;
+ uint8_t do_rtt;
+ uint8_t book_size_scale;
+ uint8_t no_fr_allowed;
+ uint8_t copy_by_ref;
+ uint8_t window_probe;
+};
+
+struct sctp_queued_to_read { /* sinfo structure Pluse more */
+ uint16_t sinfo_stream; /* off the wire */
+ uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
+ * EOR */
+ uint32_t sinfo_ppid; /* off the wire */
+ uint32_t sinfo_context; /* pick this up from assoc def context? */
+ uint32_t sinfo_timetolive; /* not used by kernel */
+ uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */
+ uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
+ sctp_assoc_t sinfo_assoc_id; /* our assoc id */
+ /* Non sinfo stuff */
+ uint32_t mid; /* Fragment Index */
+ uint32_t length; /* length of data */
+ uint32_t held_length; /* length held in sb */
+ uint32_t top_fsn; /* Highest FSN in queue */
+ uint32_t fsn_included; /* Highest FSN in *data portion */
+ struct sctp_nets *whoFrom; /* where it came from */
+ struct mbuf *data; /* front of the mbuf chain of data with
+ * PKT_HDR */
+ struct mbuf *tail_mbuf; /* used for multi-part data */
+ struct mbuf *aux_data; /* used to hold/cache control if o/s does not take it from us */
+ struct sctp_tcb *stcb; /* assoc, used for window update */
+ TAILQ_ENTRY(sctp_queued_to_read) next;
+ TAILQ_ENTRY(sctp_queued_to_read) next_instrm;
+ struct sctpchunk_listhead reasm;
+ uint16_t port_from;
+ uint16_t spec_flags; /* Flags to hold the notification field */
+ uint8_t do_not_ref_stcb;
+ uint8_t end_added;
+ uint8_t pdapi_aborted;
+ uint8_t pdapi_started;
+ uint8_t some_taken;
+ uint8_t last_frag_seen;
+ uint8_t first_frag_seen;
+ uint8_t on_read_q;
+ uint8_t on_strm_q;
+};
+
+#define SCTP_ON_ORDERED 1
+#define SCTP_ON_UNORDERED 2
+
+/* This data structure will be on the outbound
+ * stream queues. Data will be pulled off from
+ * the front of the mbuf data and chunk-ified
+ * by the output routines. We will custom
+ * fit every chunk we pull to the send/sent
+ * queue to make up the next full packet
+ * if we can. An entry cannot be removed
+ * from the stream_out queue until
+ * the msg_is_complete flag is set. This
+ * means at times data/tail_mbuf MIGHT
+ * be NULL.. If that occurs it happens
+ * for one of two reasons. Either the user
+ * is blocked on a send() call and has not
+ * awoken to copy more data down... OR
+ * the user is in the explict MSG_EOR mode
+ * and wrote some data, but has not completed
+ * sending.
+ */
+struct sctp_stream_queue_pending {
+ struct mbuf *data;
+ struct mbuf *tail_mbuf;
+ struct timeval ts;
+ struct sctp_nets *net;
+ TAILQ_ENTRY (sctp_stream_queue_pending) next;
+ TAILQ_ENTRY (sctp_stream_queue_pending) ss_next;
+ uint32_t fsn;
+ uint32_t length;
+ uint32_t timetolive;
+ uint32_t ppid;
+ uint32_t context;
+ uint16_t sinfo_flags;
+ uint16_t sid;
+ uint16_t act_flags;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref;
+ uint8_t msg_is_complete;
+ uint8_t some_taken;
+ uint8_t sender_all_done;
+ uint8_t put_last_out;
+ uint8_t discard_rest;
+};
+
+/*
+ * this struct contains info that is used to track inbound stream data and
+ * help with ordering.
+ */
+TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
+struct sctp_stream_in {
+ struct sctp_readhead inqueue;
+ struct sctp_readhead uno_inqueue;
+ uint32_t last_mid_delivered; /* used for re-order */
+ uint16_t sid;
+ uint8_t delivery_started;
+ uint8_t pd_api_started;
+};
+
+TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
+TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending);
+
+
+/* Round-robin schedulers */
+struct ss_rr {
+ /* next link in wheel */
+ TAILQ_ENTRY(sctp_stream_out) next_spoke;
+};
+
+/* Priority scheduler */
+struct ss_prio {
+ /* next link in wheel */
+ TAILQ_ENTRY(sctp_stream_out) next_spoke;
+ /* priority id */
+ uint16_t priority;
+};
+
+/* Fair Bandwidth scheduler */
+struct ss_fb {
+ /* next link in wheel */
+ TAILQ_ENTRY(sctp_stream_out) next_spoke;
+ /* stores message size */
+ int32_t rounds;
+};
+
+/*
+ * This union holds all data necessary for
+ * different stream schedulers.
+ */
+struct scheduling_data {
+ struct sctp_stream_out *locked_on_sending;
+ /* circular looking for output selection */
+ struct sctp_stream_out *last_out_stream;
+ union {
+ struct sctpwheel_listhead wheel;
+ struct sctplist_listhead list;
+ } out;
+};
+
+/*
+ * This union holds all parameters per stream
+ * necessary for different stream schedulers.
+ */
+union scheduling_parameters {
+ struct ss_rr rr;
+ struct ss_prio prio;
+ struct ss_fb fb;
+};
+
+/* States for outgoing streams */
+#define SCTP_STREAM_CLOSED 0x00
+#define SCTP_STREAM_OPENING 0x01
+#define SCTP_STREAM_OPEN 0x02
+#define SCTP_STREAM_RESET_PENDING 0x03
+#define SCTP_STREAM_RESET_IN_FLIGHT 0x04
+
+#define SCTP_MAX_STREAMS_AT_ONCE_RESET 200
+
+/* This struct is used to track the traffic on outbound streams */
+struct sctp_stream_out {
+ struct sctp_streamhead outqueue;
+ union scheduling_parameters ss_params;
+ uint32_t chunks_on_queues; /* send queue and sent queue */
+#if defined(SCTP_DETAILED_STR_STATS)
+ uint32_t abandoned_unsent[SCTP_PR_SCTP_MAX + 1];
+ uint32_t abandoned_sent[SCTP_PR_SCTP_MAX + 1];
+#else
+ /* Only the aggregation */
+ uint32_t abandoned_unsent[1];
+ uint32_t abandoned_sent[1];
+#endif
+ /* For associations using DATA chunks, the lower 16-bit of
+ * next_mid_ordered are used as the next SSN.
+ */
+ uint32_t next_mid_ordered;
+ uint32_t next_mid_unordered;
+ uint16_t sid;
+ uint8_t last_msg_incomplete;
+ uint8_t state;
+};
+
+/* used to keep track of the addresses yet to try to add/delete */
+TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
+struct sctp_asconf_addr {
+ TAILQ_ENTRY(sctp_asconf_addr) next;
+ struct sctp_asconf_addr_param ap;
+ struct sctp_ifa *ifa; /* save the ifa for add/del ip */
+ uint8_t sent; /* has this been sent yet? */
+ uint8_t special_del; /* not to be used in lookup */
+};
+
+struct sctp_scoping {
+ uint8_t ipv4_addr_legal;
+ uint8_t ipv6_addr_legal;
+#if defined(__Userspace__)
+ uint8_t conn_addr_legal;
+#endif
+ uint8_t loopback_scope;
+ uint8_t ipv4_local_scope;
+ uint8_t local_scope;
+ uint8_t site_scope;
+};
+
+#define SCTP_TSN_LOG_SIZE 40
+
+struct sctp_tsn_log {
+ void *stcb;
+ uint32_t tsn;
+ uint32_t seq;
+ uint16_t strm;
+ uint16_t sz;
+ uint16_t flgs;
+ uint16_t in_pos;
+ uint16_t in_out;
+ uint16_t resv;
+};
+
+#define SCTP_FS_SPEC_LOG_SIZE 200
+struct sctp_fs_spec_log {
+ uint32_t sent;
+ uint32_t total_flight;
+ uint32_t tsn;
+ uint16_t book;
+ uint8_t incr;
+ uint8_t decr;
+};
+
+/* This struct is here to cut out the compatiabilty
+ * pad that bulks up both the inp and stcb. The non
+ * pad portion MUST stay in complete sync with
+ * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added
+ * this must be done here too.
+ */
+struct sctp_nonpad_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint16_t sinfo_keynumber;
+ uint16_t sinfo_keynumber_valid;
+};
+
+/*
+ * JRS - Structure to hold function pointers to the functions responsible
+ * for congestion control.
+ */
+
+struct sctp_cc_functions {
+ void (*sctp_set_initial_cc_param)(struct sctp_tcb *stcb, struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_sack)(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved ,int reneged_all, int will_exit);
+ void (*sctp_cwnd_update_exit_pf)(struct sctp_tcb *stcb, struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_fr)(struct sctp_tcb *stcb,
+ struct sctp_association *asoc);
+ void (*sctp_cwnd_update_after_timeout)(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_ecn_echo)(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int in_window, int num_pkt_lost);
+ void (*sctp_cwnd_update_after_packet_dropped)(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t *bottle_bw, uint32_t *on_queue);
+ void (*sctp_cwnd_update_after_output)(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit);
+ void (*sctp_cwnd_update_packet_transmitted)(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ void (*sctp_cwnd_update_tsn_acknowledged)(struct sctp_nets *net,
+ struct sctp_tmit_chunk *);
+ void (*sctp_cwnd_new_transmission_begins)(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ void (*sctp_cwnd_prepare_net_for_sack)(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ int (*sctp_cwnd_socket_option)(struct sctp_tcb *stcb, int set, struct sctp_cc_option *);
+ void (*sctp_rtt_calculated)(struct sctp_tcb *, struct sctp_nets *, struct timeval *);
+};
+
+/*
+ * RS - Structure to hold function pointers to the functions responsible
+ * for stream scheduling.
+ */
+struct sctp_ss_functions {
+ void (*sctp_ss_init)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int holds_lock);
+ void (*sctp_ss_clear)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ int clear_values, int holds_lock);
+ void (*sctp_ss_init_stream)(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq);
+ void (*sctp_ss_add_to_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
+ int (*sctp_ss_is_empty)(struct sctp_tcb *stcb, struct sctp_association *asoc);
+ void (*sctp_ss_remove_from_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
+ struct sctp_stream_out* (*sctp_ss_select_stream)(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_association *asoc);
+ void (*sctp_ss_scheduled)(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much);
+ void (*sctp_ss_packet_done)(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct sctp_association *asoc);
+ int (*sctp_ss_get_value)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, uint16_t *value);
+ int (*sctp_ss_set_value)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_stream_out *strq, uint16_t value);
+ int (*sctp_ss_is_user_msgs_incomplete)(struct sctp_tcb *stcb, struct sctp_association *asoc);
+};
+
+/* used to save ASCONF chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_head, sctp_asconf);
+struct sctp_asconf {
+ TAILQ_ENTRY(sctp_asconf) next;
+ uint32_t serial_number;
+ uint16_t snd_count;
+ struct mbuf *data;
+ uint16_t len;
+};
+
+/* used to save ASCONF-ACK chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
+struct sctp_asconf_ack {
+ TAILQ_ENTRY(sctp_asconf_ack) next;
+ uint32_t serial_number;
+ struct sctp_nets *last_sent_to;
+ struct mbuf *data;
+ uint16_t len;
+};
+
+/*
+ * Here we have information about each individual association that we track.
+ * We probably in production would be more dynamic. But for ease of
+ * implementation we will have a fixed array that we hunt for in a linear
+ * fashion.
+ */
+struct sctp_association {
+ /* association state */
+ int state;
+
+ /* queue of pending addrs to add/delete */
+ struct sctp_asconf_addrhead asconf_queue;
+
+ struct timeval time_entered; /* time we entered state */
+ struct timeval time_last_rcvd;
+ struct timeval time_last_sent;
+ struct timeval time_last_sat_advance;
+ struct sctp_nonpad_sndrcvinfo def_send;
+
+ /* timers and such */
+ struct sctp_timer dack_timer; /* Delayed ack timer */
+ struct sctp_timer asconf_timer; /* asconf */
+ struct sctp_timer strreset_timer; /* stream reset */
+ struct sctp_timer shut_guard_timer; /* shutdown guard */
+ struct sctp_timer autoclose_timer; /* automatic close timer */
+ struct sctp_timer delete_prim_timer; /* deleting primary dst */
+
+ /* list of restricted local addresses */
+ struct sctpladdr sctp_restricted_addrs;
+
+ /* last local address pending deletion (waiting for an address add) */
+ struct sctp_ifa *asconf_addr_del_pending;
+ /* Deleted primary destination (used to stop timer) */
+ struct sctp_nets *deleted_primary;
+
+ struct sctpnetlisthead nets; /* remote address list */
+
+ /* Free chunk list */
+ struct sctpchunk_listhead free_chunks;
+
+ /* Control chunk queue */
+ struct sctpchunk_listhead control_send_queue;
+
+ /* ASCONF chunk queue */
+ struct sctpchunk_listhead asconf_send_queue;
+
+ /*
+ * Once a TSN hits the wire it is moved to the sent_queue. We
+ * maintain two counts here (don't know if any but retran_cnt is
+ * needed). The idea is that the sent_queue_retran_cnt reflects how
+ * many chunks have been marked for retranmission by either T3-rxt
+ * or FR.
+ */
+ struct sctpchunk_listhead sent_queue;
+ struct sctpchunk_listhead send_queue;
+
+ /* Scheduling queues */
+ struct scheduling_data ss_data;
+
+ /* If an iterator is looking at me, this is it */
+ struct sctp_iterator *stcb_starting_point_for_iterator;
+
+ /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
+ struct sctp_asconf_ackhead asconf_ack_sent;
+
+ /*
+ * pointer to last stream reset queued to control queue by us with
+ * requests.
+ */
+ struct sctp_tmit_chunk *str_reset;
+ /*
+ * if Source Address Selection happening, this will rotate through
+ * the link list.
+ */
+ struct sctp_laddr *last_used_address;
+
+ /* stream arrays */
+ struct sctp_stream_in *strmin;
+ struct sctp_stream_out *strmout;
+ uint8_t *mapping_array;
+ /* primary destination to use */
+ struct sctp_nets *primary_destination;
+ struct sctp_nets *alternate; /* If primary is down or PF */
+ /* For CMT */
+ struct sctp_nets *last_net_cmt_send_started;
+ /* last place I got a data chunk from */
+ struct sctp_nets *last_data_chunk_from;
+ /* last place I got a control from */
+ struct sctp_nets *last_control_chunk_from;
+
+
+ /*
+ * wait to the point the cum-ack passes req->send_reset_at_tsn for
+ * any req on the list.
+ */
+ struct sctp_resethead resetHead;
+
+ /* queue of chunks waiting to be sent into the local stack */
+ struct sctp_readhead pending_reply_queue;
+
+ /* JRS - the congestion control functions are in this struct */
+ struct sctp_cc_functions cc_functions;
+ /* JRS - value to store the currently loaded congestion control module */
+ uint32_t congestion_control_module;
+ /* RS - the stream scheduling functions are in this struct */
+ struct sctp_ss_functions ss_functions;
+ /* RS - value to store the currently loaded stream scheduling module */
+ uint32_t stream_scheduling_module;
+
+ uint32_t vrf_id;
+ uint32_t cookie_preserve_req;
+ /* ASCONF next seq I am sending out, inits at init-tsn */
+ uint32_t asconf_seq_out;
+ uint32_t asconf_seq_out_acked;
+ /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
+ uint32_t asconf_seq_in;
+
+ /* next seq I am sending in str reset messages */
+ uint32_t str_reset_seq_out;
+ /* next seq I am expecting in str reset messages */
+ uint32_t str_reset_seq_in;
+
+ /* various verification tag information */
+ uint32_t my_vtag; /* The tag to be used. if assoc is re-initited
+ * by remote end, and I have unlocked this
+ * will be regenerated to a new random value. */
+ uint32_t peer_vtag; /* The peers last tag */
+
+ uint32_t my_vtag_nonce;
+ uint32_t peer_vtag_nonce;
+
+ uint32_t assoc_id;
+
+ /* This is the SCTP fragmentation threshold */
+ uint32_t smallest_mtu;
+
+ /*
+ * Special hook for Fast retransmit, allows us to track the highest
+ * TSN that is NEW in this SACK if gap ack blocks are present.
+ */
+ uint32_t this_sack_highest_gap;
+
+ /*
+ * The highest consecutive TSN that has been acked by peer on my
+ * sends
+ */
+ uint32_t last_acked_seq;
+
+ /* The next TSN that I will use in sending. */
+ uint32_t sending_seq;
+
+ /* Original seq number I used ??questionable to keep?? */
+ uint32_t init_seq_number;
+
+
+ /* The Advanced Peer Ack Point, as required by the PR-SCTP */
+ /* (A1 in Section 4.2) */
+ uint32_t advanced_peer_ack_point;
+
+ /*
+ * The highest consequetive TSN at the bottom of the mapping array
+ * (for his sends).
+ */
+ uint32_t cumulative_tsn;
+ /*
+ * Used to track the mapping array and its offset bits. This MAY be
+ * lower then cumulative_tsn.
+ */
+ uint32_t mapping_array_base_tsn;
+ /*
+ * used to track highest TSN we have received and is listed in the
+ * mapping array.
+ */
+ uint32_t highest_tsn_inside_map;
+
+ /* EY - new NR variables used for nr_sack based on mapping_array*/
+ uint8_t *nr_mapping_array;
+ uint32_t highest_tsn_inside_nr_map;
+
+ uint32_t fast_recovery_tsn;
+ uint32_t sat_t3_recovery_tsn;
+ uint32_t tsn_last_delivered;
+ /*
+ * For the pd-api we should re-write this a bit more efficient. We
+ * could have multiple sctp_queued_to_read's that we are building at
+ * once. Now we only do this when we get ready to deliver to the
+ * socket buffer. Note that we depend on the fact that the struct is
+ * "stuck" on the read queue until we finish all the pd-api.
+ */
+ struct sctp_queued_to_read *control_pdapi;
+
+ uint32_t tsn_of_pdapi_last_delivered;
+ uint32_t pdapi_ppid;
+ uint32_t context;
+ uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ /*
+ * special log - This adds considerable size
+ * to the asoc, but provides a log that you
+ * can use to detect problems via kgdb.
+ */
+ struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE];
+ struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE];
+ uint32_t cumack_log[SCTP_TSN_LOG_SIZE];
+ uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE];
+ uint16_t tsn_in_at;
+ uint16_t tsn_out_at;
+ uint16_t tsn_in_wrapped;
+ uint16_t tsn_out_wrapped;
+ uint16_t cumack_log_at;
+ uint16_t cumack_log_atsnt;
+#endif /* SCTP_ASOCLOG_OF_TSNS */
+#ifdef SCTP_FS_SPEC_LOG
+ struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE];
+ uint16_t fs_index;
+#endif
+
+ /*
+ * window state information and smallest MTU that I use to bound
+ * segmentation
+ */
+ uint32_t peers_rwnd;
+ uint32_t my_rwnd;
+ uint32_t my_last_reported_rwnd;
+ uint32_t sctp_frag_point;
+
+ uint32_t total_output_queue_size;
+
+ uint32_t sb_cc; /* shadow of sb_cc */
+ uint32_t sb_send_resv; /* amount reserved on a send */
+ uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd control */
+#ifdef INET6
+ uint32_t default_flowlabel;
+#endif
+ uint32_t pr_sctp_cnt;
+ int ctrl_queue_cnt; /* could be removed REM - NO IT CAN'T!! RRS */
+ /*
+ * All outbound datagrams queue into this list from the individual
+ * stream queue. Here they get assigned a TSN and then await
+ * sending. The stream seq comes when it is first put in the
+ * individual str queue
+ */
+ unsigned int stream_queue_cnt;
+ unsigned int send_queue_cnt;
+ unsigned int sent_queue_cnt;
+ unsigned int sent_queue_cnt_removeable;
+ /*
+ * Number on sent queue that are marked for retran until this value
+ * is 0 we only send one packet of retran'ed data.
+ */
+ unsigned int sent_queue_retran_cnt;
+
+ unsigned int size_on_reasm_queue;
+ unsigned int cnt_on_reasm_queue;
+ unsigned int fwd_tsn_cnt;
+ /* amount of data (bytes) currently in flight (on all destinations) */
+ unsigned int total_flight;
+ /* Total book size in flight */
+ unsigned int total_flight_count; /* count of chunks used with
+ * book total */
+ /* count of destinaton nets and list of destination nets */
+ unsigned int numnets;
+
+ /* Total error count on this association */
+ unsigned int overall_error_count;
+
+ unsigned int cnt_msg_on_sb;
+
+ /* All stream count of chunks for delivery */
+ unsigned int size_on_all_streams;
+ unsigned int cnt_on_all_streams;
+
+ /* Heart Beat delay in ms */
+ uint32_t heart_beat_delay;
+
+ /* autoclose */
+ uint32_t sctp_autoclose_ticks;
+
+ /* how many preopen streams we have */
+ unsigned int pre_open_streams;
+
+ /* How many streams I support coming into me */
+ unsigned int max_inbound_streams;
+
+ /* the cookie life I award for any cookie, in seconds */
+ uint32_t cookie_life;
+ /* time to delay acks for */
+ unsigned int delayed_ack;
+ unsigned int old_delayed_ack;
+ unsigned int sack_freq;
+ unsigned int data_pkts_seen;
+
+ unsigned int numduptsns;
+ int dup_tsns[SCTP_MAX_DUP_TSNS];
+ uint32_t initial_init_rto_max; /* initial RTO for INIT's */
+ uint32_t initial_rto; /* initial send RTO */
+ uint32_t minrto; /* per assoc RTO-MIN */
+ uint32_t maxrto; /* per assoc RTO-MAX */
+
+ /* authentication fields */
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_auth_chklist_t *peer_auth_chunks;
+ sctp_hmaclist_t *local_hmacs; /* local HMACs supported */
+ sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
+ struct sctp_keyhead shared_keys; /* assoc's shared keys */
+ sctp_authinfo_t authinfo; /* randoms, cached keys */
+ /*
+ * refcnt to block freeing when a sender or receiver is off coping
+ * user data in.
+ */
+ uint32_t refcnt;
+ uint32_t chunks_on_out_queue; /* total chunks floating around,
+ * locked by send socket buffer */
+ uint32_t peers_adaptation;
+ uint32_t default_mtu;
+ uint16_t peer_hmac_id; /* peer HMAC id to send */
+
+ /*
+ * Being that we have no bag to collect stale cookies, and that we
+ * really would not want to anyway.. we will count them in this
+ * counter. We of course feed them to the pigeons right away (I have
+ * always thought of pigeons as flying rats).
+ */
+ uint16_t stale_cookie_count;
+
+ /*
+ * For the partial delivery API, if up, invoked this is what last
+ * TSN I delivered
+ */
+ uint16_t str_of_pdapi;
+ uint16_t ssn_of_pdapi;
+
+ /* counts of actual built streams. Allocation may be more however */
+ /* could re-arrange to optimize space here. */
+ uint16_t streamincnt;
+ uint16_t streamoutcnt;
+ uint16_t strm_realoutsize;
+ uint16_t strm_pending_add_size;
+ /* my maximum number of retrans of INIT and SEND */
+ /* copied from SCTP but should be individually setable */
+ uint16_t max_init_times;
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ uint16_t def_net_pf_threshold;
+
+ /*
+ * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
+ * awaiting ACK
+ */
+ uint16_t mapping_array_size;
+
+ uint16_t last_strm_seq_delivered;
+ uint16_t last_strm_no_delivered;
+
+ uint16_t last_revoke_count;
+ int16_t num_send_timers_up;
+
+ uint16_t stream_locked_on;
+ uint16_t ecn_echo_cnt_onq;
+
+ uint16_t free_chunk_cnt;
+ uint8_t stream_locked;
+ uint8_t authenticated; /* packet authenticated ok */
+ /*
+ * This flag indicates that a SACK need to be sent.
+ * Initially this is 1 to send the first sACK immediately.
+ */
+ uint8_t send_sack;
+
+ /* max burst of new packets into the network */
+ uint32_t max_burst;
+ /* max burst of fast retransmit packets */
+ uint32_t fr_max_burst;
+
+ uint8_t sat_network; /* RTT is in range of sat net or greater */
+ uint8_t sat_network_lockout; /* lockout code */
+ uint8_t burst_limit_applied; /* Burst limit in effect at last send? */
+ /* flag goes on when we are doing a partial delivery api */
+ uint8_t hb_random_values[4];
+ uint8_t fragmented_delivery_inprogress;
+ uint8_t fragment_flags;
+ uint8_t last_flags_delivered;
+ uint8_t hb_ect_randombit;
+ uint8_t hb_random_idx;
+ uint8_t default_dscp;
+ uint8_t asconf_del_pending; /* asconf delete last addr pending */
+ uint8_t trigger_reset;
+ /*
+ * This value, plus all other ack'd but above cum-ack is added
+ * together to cross check against the bit that we have yet to
+ * define (probably in the SACK). When the cum-ack is updated, this
+ * sum is updated as well.
+ */
+
+ /* Flags whether an extension is supported or not */
+ uint8_t ecn_supported;
+ uint8_t prsctp_supported;
+ uint8_t auth_supported;
+ uint8_t asconf_supported;
+ uint8_t reconfig_supported;
+ uint8_t nrsack_supported;
+ uint8_t pktdrop_supported;
+ uint8_t idata_supported;
+
+ /* Did the peer make the stream config (add out) request */
+ uint8_t peer_req_out;
+
+ uint8_t local_strreset_support;
+ uint8_t peer_supports_nat;
+
+ struct sctp_scoping scope;
+ /* flags to handle send alternate net tracking */
+ uint8_t used_alt_asconfack;
+ uint8_t fast_retran_loss_recovery;
+ uint8_t sat_t3_loss_recovery;
+ uint8_t dropped_special_cnt;
+ uint8_t seen_a_sack_this_pkt;
+ uint8_t stream_reset_outstanding;
+ uint8_t stream_reset_out_is_outstanding;
+ uint8_t delayed_connection;
+ uint8_t ifp_had_enobuf;
+ uint8_t saw_sack_with_frags;
+ uint8_t saw_sack_with_nr_frags;
+ uint8_t in_asocid_hash;
+ uint8_t assoc_up_sent;
+ uint8_t adaptation_needed;
+ uint8_t adaptation_sent;
+ /* CMT variables */
+ uint8_t cmt_dac_pkts_rcvd;
+ uint8_t sctp_cmt_on_off;
+ uint8_t iam_blocking;
+ uint8_t cookie_how[8];
+ /* JRS 5/21/07 - CMT PF variable */
+ uint8_t sctp_cmt_pf;
+ uint8_t use_precise_time;
+ uint64_t sctp_features;
+ uint32_t max_cwnd;
+ uint16_t port; /* remote UDP encapsulation port */
+ /*
+ * The mapping array is used to track out of order sequences above
+ * last_acked_seq. 0 indicates packet missing 1 indicates packet
+ * rec'd. We slide it up every time we raise last_acked_seq and 0
+ * trailing locactions out. If I get a TSN above the array
+ * mappingArraySz, I discard the datagram and let retransmit happen.
+ */
+ uint32_t marked_retrans;
+ uint32_t timoinit;
+ uint32_t timodata;
+ uint32_t timosack;
+ uint32_t timoshutdown;
+ uint32_t timoheartbeat;
+ uint32_t timocookie;
+ uint32_t timoshutdownack;
+ struct timeval start_time;
+ struct timeval discontinuity_time;
+ uint64_t abandoned_unsent[SCTP_PR_SCTP_MAX + 1];
+ uint64_t abandoned_sent[SCTP_PR_SCTP_MAX + 1];
+};
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.c
new file mode 100644
index 000000000..91068118e
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.c
@@ -0,0 +1,1625 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.c 361934 2020-06-08 20:23:20Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+#include <netinet/sctp_bsd_addr.h>
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+FEATURE(sctp, "Stream Control Transmission Protocol");
+#endif
+
+/*
+ * sysctl tunable variables
+ */
+
+void
+sctp_init_sysctls()
+{
+ SCTP_BASE_SYSCTL(sctp_sendspace) = SCTPCTL_MAXDGRAM_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTPCTL_RECVSPACE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_auto_asconf) = SCTPCTL_AUTOASCONF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_multiple_asconfs) = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_ecn_enable) = SCTPCTL_ECN_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_pr_enable) = SCTPCTL_PR_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_auth_enable) = SCTPCTL_AUTH_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_asconf_enable) = SCTPCTL_ASCONF_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_reconfig_enable) = SCTPCTL_RECONFIG_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nrsack_enable) = SCTPCTL_NRSACK_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_pktdrop_enable) = SCTPCTL_PKTDROP_ENABLE_DEFAULT;
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT;
+#endif
+ SCTP_BASE_SYSCTL(sctp_peer_chunk_oh) = SCTPCTL_PEER_CHKOH_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_burst_default) = SCTPCTL_MAXBURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_fr_max_burst_default) = SCTPCTL_FRMAXBURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = SCTPCTL_MAXCHUNKS_DEFAULT;
+#if defined(__Userspace__)
+ if (SCTP_BASE_SYSCTL(sctp_hashtblsize) == 0) {
+ SCTP_BASE_SYSCTL(sctp_hashtblsize) = SCTPCTL_TCBHASHSIZE_DEFAULT;
+ }
+#else
+ SCTP_BASE_SYSCTL(sctp_hashtblsize) = SCTPCTL_TCBHASHSIZE_DEFAULT;
+#endif
+#if defined(__Userspace__)
+ if (SCTP_BASE_SYSCTL(sctp_pcbtblsize) == 0) {
+ SCTP_BASE_SYSCTL(sctp_pcbtblsize) = SCTPCTL_PCBHASHSIZE_DEFAULT;
+ }
+#else
+ SCTP_BASE_SYSCTL(sctp_pcbtblsize) = SCTPCTL_PCBHASHSIZE_DEFAULT;
+#endif
+ SCTP_BASE_SYSCTL(sctp_min_split_point) = SCTPCTL_MIN_SPLIT_POINT_DEFAULT;
+#if defined(__Userspace__)
+ if (SCTP_BASE_SYSCTL(sctp_chunkscale) == 0) {
+ SCTP_BASE_SYSCTL(sctp_chunkscale) = SCTPCTL_CHUNKSCALE_DEFAULT;
+ }
+#else
+ SCTP_BASE_SYSCTL(sctp_chunkscale) = SCTPCTL_CHUNKSCALE_DEFAULT;
+#endif
+ SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default) = SCTPCTL_DELAYED_SACK_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_sack_freq_default) = SCTPCTL_SACK_FREQ_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_system_free_resc_limit) = SCTPCTL_SYS_RESOURCE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit) = SCTPCTL_ASOC_RESOURCE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default) = SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default) = SCTPCTL_PMTU_RAISE_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default) = SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_secret_lifetime_default) = SCTPCTL_SECRET_LIFETIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_max_default) = SCTPCTL_RTO_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_min_default) = SCTPCTL_RTO_MIN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_initial_default) = SCTPCTL_RTO_INITIAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_init_rto_max_default) = SCTPCTL_INIT_RTO_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default) = SCTPCTL_VALID_COOKIE_LIFE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_init_rtx_max_default) = SCTPCTL_INIT_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default) = SCTPCTL_ASSOC_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_path_rtx_max_default) = SCTPCTL_PATH_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_path_pf_threshold) = SCTPCTL_PATH_PF_THRESHOLD_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default) = SCTPCTL_INCOMING_STREAMS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_L2_abc_variable) = SCTPCTL_ABC_L_VAR_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) = SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_do_drain) = SCTPCTL_DO_SCTP_DRAIN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_hb_maxburst) = SCTPCTL_HB_MAX_BURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit) = SCTPCTL_ABORT_AT_LIMIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_min_residual) = SCTPCTL_MIN_RESIDUAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_retran_chunk) = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_logging_level) = SCTPCTL_LOGGING_LEVEL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_default_cc_module) = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_default_ss_module) = SCTPCTL_DEFAULT_SS_MODULE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_default_frag_interleave) = SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mobility_base) = SCTPCTL_MOBILITY_BASE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_vtag_time_wait) = SCTPCTL_TIME_WAIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_buffer_splitting) = SCTPCTL_BUFFER_SPLITTING_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_initial_cwnd) = SCTPCTL_INITIAL_CWND_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rttvar_bw) = SCTPCTL_RTTVAR_BW_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rttvar_rtt) = SCTPCTL_RTTVAR_RTT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rttvar_eqret) = SCTPCTL_RTTVAR_EQRET_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_steady_step) = SCTPCTL_RTTVAR_STEADYS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_use_dccc_ecn) = SCTPCTL_RTTVAR_DCCCECN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_blackhole) = SCTPCTL_BLACKHOLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_sendall_limit) = SCTPCTL_SENDALL_LIMIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_diag_info_code) = SCTPCTL_DIAG_INFO_CODE_DEFAULT;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(_WIN32) && !defined(__Userspace__)
+ /* On Windows, the resource for global variables is limited. */
+ MALLOC(SCTP_BASE_SYSCTL(sctp_log), struct sctp_log *, sizeof(struct sctp_log), M_SYSCTL, M_ZERO);
+#else
+ memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+#endif
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly) = SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT;
+#if defined(SCTP_DEBUG)
+ SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) = SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_main_timer) = SCTPCTL_MAIN_TIMER_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_addr_watchdog_limit) = SCTPCTL_ADDR_WATCHDOG_LIMIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_vtag_watchdog_limit) = SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_BASE_SYSCTL(sctp_output_unlocked) = SCTPCTL_OUTPUT_UNLOCKED_DEFAULT;
+#endif
+}
+
+#if defined(_WIN32) && !defined(__Userspace__)
+void
+sctp_finish_sysctls()
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ if (SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+ FREE(SCTP_BASE_SYSCTL(sctp_log), M_SYSCTL);
+ SCTP_BASE_SYSCTL(sctp_log) = NULL;
+ }
+#endif
+}
+#endif
+
+#if !defined(__Userspace__)
+/* It returns an upper limit. No filtering is done here */
+static unsigned int
+sctp_sysctl_number_of_addresses(struct sctp_inpcb *inp)
+{
+ unsigned int cnt;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ struct sctp_laddr *laddr;
+
+ cnt = 0;
+ /* neither Mac OS X nor FreeBSD support mulitple routing functions */
+ if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#endif
+#ifdef INET6
+ case AF_INET6:
+#endif
+ cnt++;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ } else {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#endif
+#ifdef INET6
+ case AF_INET6:
+#endif
+ cnt++;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return (cnt);
+}
+
+static int
+sctp_sysctl_copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sysctl_req *req)
+{
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+ int ipv4_addr_legal, ipv6_addr_legal;
+#if defined(__Userspace__)
+ int conn_addr_legal;
+#endif
+ struct sctp_vrf *vrf;
+ struct xsctp_laddr xladdr;
+ struct sctp_laddr *laddr;
+ int error;
+
+ /* Turn on all the appropriate scope */
+ if (stcb) {
+ /* use association specific values */
+ loopback_scope = stcb->asoc.scope.loopback_scope;
+ ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+ local_scope = stcb->asoc.scope.local_scope;
+ site_scope = stcb->asoc.scope.site_scope;
+ ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+ ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#if defined(__Userspace__)
+ conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+ } else {
+ /* Use generic values for endpoints. */
+ loopback_scope = 1;
+ ipv4_local_scope = 1;
+ local_scope = 1;
+ site_scope = 1;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ ipv4_addr_legal = 0;
+ } else {
+ ipv4_addr_legal = 1;
+ }
+#if defined(__Userspace__)
+ conn_addr_legal = 0;
+#endif
+ } else {
+ ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ conn_addr_legal = 1;
+ ipv4_addr_legal = 0;
+ } else {
+ conn_addr_legal = 0;
+ ipv4_addr_legal = 1;
+ }
+#else
+ ipv4_addr_legal = 1;
+#endif
+ }
+ }
+
+ /* neither Mac OS X nor FreeBSD support mulitple routing functions */
+ if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (-1);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn))
+ /* Skip loopback if loopback_scope not set */
+ continue;
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (stcb) {
+ /*
+ * ignore if blacklisted at
+ * association level
+ */
+ if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ continue;
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = &sctp_ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0)
+ continue;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)))
+ continue;
+ } else {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = &sctp_ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ continue;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ }
+ if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)))
+ continue;
+ } else {
+ continue;
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (!conn_addr_legal) {
+ continue;
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ memcpy((void *)&xladdr.address, (const void *)&sctp_ifa->address, sizeof(union sctp_sockstore));
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ }
+ }
+ } else {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ /* ignore if blacklisted at association level */
+ if (stcb && sctp_is_addr_restricted(stcb, laddr->ifa))
+ continue;
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ memcpy((void *)&xladdr.address, (const void *)&laddr->ifa->address, sizeof(union sctp_sockstore));
+ xladdr.start_time.tv_sec = (uint32_t)laddr->start_time.tv_sec;
+ xladdr.start_time.tv_usec = (uint32_t)laddr->start_time.tv_usec;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ }
+ }
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ xladdr.last = 1;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ return (0);
+ }
+}
+
+/*
+ * sysctl functions
+ */
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_assoclist SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ unsigned int number_of_endpoints;
+ unsigned int number_of_local_addresses;
+ unsigned int number_of_associations;
+ unsigned int number_of_remote_addresses;
+ unsigned int n;
+ int error;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct xsctp_inpcb xinpcb;
+ struct xsctp_tcb xstcb;
+ struct xsctp_raddr xraddr;
+ struct socket *so;
+
+ number_of_endpoints = 0;
+ number_of_local_addresses = 0;
+ number_of_associations = 0;
+ number_of_remote_addresses = 0;
+
+ SCTP_INP_INFO_RLOCK();
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (req->oldptr == USER_ADDR_NULL) {
+#else
+ if (req->oldptr == NULL) {
+#endif
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ SCTP_INP_RLOCK(inp);
+ number_of_endpoints++;
+ number_of_local_addresses += sctp_sysctl_number_of_addresses(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ number_of_associations++;
+ number_of_local_addresses += sctp_sysctl_number_of_addresses(inp);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ number_of_remote_addresses++;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ n = (number_of_endpoints + 1) * sizeof(struct xsctp_inpcb) +
+ (number_of_local_addresses + number_of_endpoints + number_of_associations) * sizeof(struct xsctp_laddr) +
+ (number_of_associations + number_of_endpoints) * sizeof(struct xsctp_tcb) +
+ (number_of_remote_addresses + number_of_associations) * sizeof(struct xsctp_raddr);
+
+ /* request some more memory than needed */
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ req->oldidx = (n + n / 8);
+#else
+ req->dataidx = (n + n / 8);
+#endif
+ return (0);
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (req->newptr != USER_ADDR_NULL) {
+#else
+ if (req->newptr != NULL) {
+#endif
+ SCTP_INP_INFO_RUNLOCK();
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_SYSCTL, EPERM);
+ return (EPERM);
+ }
+ memset(&xinpcb, 0, sizeof(xinpcb));
+ memset(&xstcb, 0, sizeof(xstcb));
+ memset(&xraddr, 0, sizeof(xraddr));
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* if its allgone it is being freed - skip it */
+ goto skip;
+ }
+ xinpcb.last = 0;
+ xinpcb.local_port = ntohs(inp->sctp_lport);
+ xinpcb.flags = inp->sctp_flags;
+ xinpcb.features = inp->sctp_features;
+ xinpcb.total_sends = inp->total_sends;
+ xinpcb.total_recvs = inp->total_recvs;
+ xinpcb.total_nospaces = inp->total_nospaces;
+ xinpcb.fragmentation_point = inp->sctp_frag_point;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ xinpcb.socket = (uintptr_t)inp->sctp_socket;
+#else
+ xinpcb.socket = inp->sctp_socket;
+#endif
+ so = inp->sctp_socket;
+ if ((so == NULL) ||
+ (!SCTP_IS_LISTENING(inp)) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ xinpcb.qlen = 0;
+ xinpcb.maxqlen = 0;
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ xinpcb.qlen = so->sol_qlen;
+ xinpcb.qlen_old = so->sol_qlen > USHRT_MAX ?
+ USHRT_MAX : (uint16_t) so->sol_qlen;
+ xinpcb.maxqlen = so->sol_qlimit;
+ xinpcb.maxqlen_old = so->sol_qlimit > USHRT_MAX ?
+ USHRT_MAX : (uint16_t) so->sol_qlimit;
+#else
+ xinpcb.qlen = so->so_qlen;
+ xinpcb.maxqlen = so->so_qlimit;
+#endif
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ error = sctp_sysctl_copy_out_local_addresses(inp, NULL, req);
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+ }
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ xstcb.last = 0;
+ xstcb.local_port = ntohs(inp->sctp_lport);
+ xstcb.remote_port = ntohs(stcb->rport);
+ if (stcb->asoc.primary_destination != NULL)
+ xstcb.primary_addr = stcb->asoc.primary_destination->ro._l_addr;
+ xstcb.heartbeat_interval = stcb->asoc.heart_beat_delay;
+ xstcb.state = (uint32_t)sctp_map_assoc_state(stcb->asoc.state);
+ xstcb.assoc_id = sctp_get_associd(stcb);
+ xstcb.peers_rwnd = stcb->asoc.peers_rwnd;
+ xstcb.in_streams = stcb->asoc.streamincnt;
+ xstcb.out_streams = stcb->asoc.streamoutcnt;
+ xstcb.max_nr_retrans = stcb->asoc.overall_error_count;
+ xstcb.primary_process = 0; /* not really supported yet */
+ xstcb.T1_expireries = stcb->asoc.timoinit + stcb->asoc.timocookie;
+ xstcb.T2_expireries = stcb->asoc.timoshutdown + stcb->asoc.timoshutdownack;
+ xstcb.retransmitted_tsns = stcb->asoc.marked_retrans;
+ xstcb.start_time.tv_sec = (uint32_t)stcb->asoc.start_time.tv_sec;
+ xstcb.start_time.tv_usec = (uint32_t)stcb->asoc.start_time.tv_usec;
+ xstcb.discontinuity_time.tv_sec = (uint32_t)stcb->asoc.discontinuity_time.tv_sec;
+ xstcb.discontinuity_time.tv_usec = (uint32_t)stcb->asoc.discontinuity_time.tv_usec;
+ xstcb.total_sends = stcb->total_sends;
+ xstcb.total_recvs = stcb->total_recvs;
+ xstcb.local_tag = stcb->asoc.my_vtag;
+ xstcb.remote_tag = stcb->asoc.peer_vtag;
+ xstcb.initial_tsn = stcb->asoc.init_seq_number;
+ xstcb.highest_tsn = stcb->asoc.sending_seq - 1;
+ xstcb.cumulative_tsn = stcb->asoc.last_acked_seq;
+ xstcb.cumulative_tsn_ack = stcb->asoc.cumulative_tsn;
+ xstcb.mtu = stcb->asoc.smallest_mtu;
+ xstcb.refcnt = stcb->asoc.refcnt;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (error);
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ error = sctp_sysctl_copy_out_local_addresses(inp, stcb, req);
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (error);
+ }
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ xraddr.last = 0;
+ xraddr.address = net->ro._l_addr;
+ xraddr.active = ((net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE);
+ xraddr.confirmed = ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0);
+ xraddr.heartbeat_enabled = ((net->dest_state & SCTP_ADDR_NOHB) == 0);
+ xraddr.potentially_failed = ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF);
+ xraddr.rto = net->RTO;
+ xraddr.max_path_rtx = net->failure_threshold;
+ xraddr.rtx = net->marked_retrans;
+ xraddr.error_counter = net->error_count;
+ xraddr.cwnd = net->cwnd;
+ xraddr.flight_size = net->flight_size;
+ xraddr.mtu = net->mtu;
+ xraddr.rtt = net->rtt / 1000;
+ xraddr.heartbeat_interval = net->heart_beat_delay;
+ xraddr.ssthresh = net->ssthresh;
+ xraddr.encaps_port = net->port;
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ xraddr.state = SCTP_UNCONFIRMED;
+ } else if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ xraddr.state = SCTP_ACTIVE;
+ } else {
+ xraddr.state = SCTP_INACTIVE;
+ }
+ xraddr.start_time.tv_sec = (uint32_t)net->start_time.tv_sec;
+ xraddr.start_time.tv_usec = (uint32_t)net->start_time.tv_usec;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (error);
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ memset((void *)&xraddr, 0, sizeof(struct xsctp_raddr));
+ xraddr.last = 1;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ memset((void *)&xstcb, 0, sizeof(struct xsctp_tcb));
+ xstcb.last = 1;
+ error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+ if (error) {
+ return (error);
+ }
+skip:
+ SCTP_INP_INFO_RLOCK();
+ }
+ SCTP_INP_INFO_RUNLOCK();
+
+ memset((void *)&xinpcb, 0, sizeof(struct xsctp_inpcb));
+ xinpcb.last = 1;
+ error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+ return (error);
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_udp_tunneling SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_udp_tunneling(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error;
+ uint32_t old, new;
+
+ SCTP_INP_INFO_RLOCK();
+ old = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+ SCTP_INP_INFO_RUNLOCK();
+ new = old;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if ((error == 0) &&
+#if defined(__APPLE__) && !defined(__Userspace__)
+ (req->newptr != USER_ADDR_NULL)) {
+#else
+ (req->newptr != NULL)) {
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+ SCTP_INP_INFO_WLOCK();
+ sctp_over_udp_restart();
+ SCTP_INP_INFO_WUNLOCK();
+#else
+#if (SCTPCTL_UDP_TUNNELING_PORT_MIN == 0)
+ if (new > SCTPCTL_UDP_TUNNELING_PORT_MAX) {
+#else
+ if ((new < SCTPCTL_UDP_TUNNELING_PORT_MIN) ||
+ (new > SCTPCTL_UDP_TUNNELING_PORT_MAX)) {
+#endif
+ error = EINVAL;
+ } else {
+ SCTP_INP_INFO_WLOCK();
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = new;
+ if (old != 0) {
+ sctp_over_udp_stop();
+ }
+ if (new != 0) {
+ error = sctp_over_udp_start();
+ }
+ SCTP_INP_INFO_WUNLOCK();
+ }
+#endif
+ }
+ return (error);
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+int sctp_is_vmware_interface(struct ifnet *);
+
+static int
+sctp_sysctl_handle_vmware_interfaces SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error;
+ uint32_t old, new;
+
+ old = SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces);
+ new = old;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if ((error == 0) && (req->newptr != USER_ADDR_NULL)) {
+ if ((new < SCTPCTL_IGNORE_VMWARE_INTERFACES_MIN) ||
+ (new > SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX)) {
+ error = EINVAL;
+ } else {
+ if ((old == 1) && (new == 0)) {
+ sctp_add_or_del_interfaces(sctp_is_vmware_interface, 1);
+ }
+ if ((old == 0) && (new == 1)) {
+ sctp_add_or_del_interfaces(sctp_is_vmware_interface, 0);
+ }
+ if (old != new) {
+ SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) = new;
+ }
+ }
+ }
+ return (error);
+}
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_auth SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_auth(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error;
+ uint32_t new;
+
+ new = SCTP_BASE_SYSCTL(sctp_auth_enable);
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if ((error == 0) &&
+#if defined(__APPLE__) && !defined(__Userspace__)
+ (req->newptr != USER_ADDR_NULL)) {
+#else
+ (req->newptr != NULL)) {
+#endif
+#if (SCTPCTL_AUTH_ENABLE_MIN == 0)
+ if ((new > SCTPCTL_AUTH_ENABLE_MAX) ||
+ ((new == 0) && (SCTP_BASE_SYSCTL(sctp_asconf_enable) == 1))) {
+#else
+ if ((new < SCTPCTL_AUTH_ENABLE_MIN) ||
+ (new > SCTPCTL_AUTH_ENABLE_MAX) ||
+ ((new == 0) && (SCTP_BASE_SYSCTL(sctp_asconf_enable) == 1))) {
+#endif
+ error = EINVAL;
+ } else {
+ SCTP_BASE_SYSCTL(sctp_auth_enable) = new;
+ }
+ }
+ return (error);
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_asconf SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_asconf(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error;
+ uint32_t new;
+
+ new = SCTP_BASE_SYSCTL(sctp_asconf_enable);
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if ((error == 0) &&
+#if defined(__APPLE__) && !defined(__Userspace__)
+ (req->newptr != USER_ADDR_NULL)) {
+#else
+ (req->newptr != NULL)) {
+#endif
+#if (SCTPCTL_ASCONF_ENABLE_MIN == 0)
+ if ((new > SCTPCTL_ASCONF_ENABLE_MAX) ||
+ ((new == 1) && (SCTP_BASE_SYSCTL(sctp_auth_enable) == 0))) {
+#else
+ if ((new < SCTPCTL_ASCONF_ENABLE_MIN) ||
+ (new > SCTPCTL_ASCONF_ENABLE_MAX) ||
+ ((new == 1) && (SCTP_BASE_SYSCTL(sctp_auth_enable) == 0))) {
+#endif
+ error = EINVAL;
+ } else {
+ SCTP_BASE_SYSCTL(sctp_asconf_enable) = new;
+ }
+ }
+ return (error);
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ struct sctpstat *sarry;
+ struct sctpstat sb;
+ int cpu;
+#endif
+ struct sctpstat sb_temp;
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if ((req->newptr != USER_ADDR_NULL) &&
+#else
+ if ((req->newptr != NULL) &&
+#endif
+ (req->newlen != sizeof(struct sctpstat))) {
+ return (EINVAL);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ memset(&sb_temp, 0, sizeof(struct sctpstat));
+
+ if (req->newptr != NULL) {
+ error = SYSCTL_IN(req, &sb_temp, sizeof(struct sctpstat));
+ if (error != 0) {
+ return (error);
+ }
+ }
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ memset(&sb, 0, sizeof(sb));
+ for (cpu = 0; cpu < mp_maxid; cpu++) {
+ sarry = &SCTP_BASE_STATS[cpu];
+ if (sarry->sctps_discontinuitytime.tv_sec > sb.sctps_discontinuitytime.tv_sec) {
+ sb.sctps_discontinuitytime.tv_sec = sarry->sctps_discontinuitytime.tv_sec;
+ sb.sctps_discontinuitytime.tv_usec = sarry->sctps_discontinuitytime.tv_usec;
+ }
+ sb.sctps_currestab += sarry->sctps_currestab;
+ sb.sctps_activeestab += sarry->sctps_activeestab;
+ sb.sctps_restartestab += sarry->sctps_restartestab;
+ sb.sctps_collisionestab += sarry->sctps_collisionestab;
+ sb.sctps_passiveestab += sarry->sctps_passiveestab;
+ sb.sctps_aborted += sarry->sctps_aborted;
+ sb.sctps_shutdown += sarry->sctps_shutdown;
+ sb.sctps_outoftheblue += sarry->sctps_outoftheblue;
+ sb.sctps_checksumerrors += sarry->sctps_checksumerrors;
+ sb.sctps_outcontrolchunks += sarry->sctps_outcontrolchunks;
+ sb.sctps_outorderchunks += sarry->sctps_outorderchunks;
+ sb.sctps_outunorderchunks += sarry->sctps_outunorderchunks;
+ sb.sctps_incontrolchunks += sarry->sctps_incontrolchunks;
+ sb.sctps_inorderchunks += sarry->sctps_inorderchunks;
+ sb.sctps_inunorderchunks += sarry->sctps_inunorderchunks;
+ sb.sctps_fragusrmsgs += sarry->sctps_fragusrmsgs;
+ sb.sctps_reasmusrmsgs += sarry->sctps_reasmusrmsgs;
+ sb.sctps_outpackets += sarry->sctps_outpackets;
+ sb.sctps_inpackets += sarry->sctps_inpackets;
+ sb.sctps_recvpackets += sarry->sctps_recvpackets;
+ sb.sctps_recvdatagrams += sarry->sctps_recvdatagrams;
+ sb.sctps_recvpktwithdata += sarry->sctps_recvpktwithdata;
+ sb.sctps_recvsacks += sarry->sctps_recvsacks;
+ sb.sctps_recvdata += sarry->sctps_recvdata;
+ sb.sctps_recvdupdata += sarry->sctps_recvdupdata;
+ sb.sctps_recvheartbeat += sarry->sctps_recvheartbeat;
+ sb.sctps_recvheartbeatack += sarry->sctps_recvheartbeatack;
+ sb.sctps_recvecne += sarry->sctps_recvecne;
+ sb.sctps_recvauth += sarry->sctps_recvauth;
+ sb.sctps_recvauthmissing += sarry->sctps_recvauthmissing;
+ sb.sctps_recvivalhmacid += sarry->sctps_recvivalhmacid;
+ sb.sctps_recvivalkeyid += sarry->sctps_recvivalkeyid;
+ sb.sctps_recvauthfailed += sarry->sctps_recvauthfailed;
+ sb.sctps_recvexpress += sarry->sctps_recvexpress;
+ sb.sctps_recvexpressm += sarry->sctps_recvexpressm;
+ sb.sctps_recvswcrc += sarry->sctps_recvswcrc;
+ sb.sctps_recvhwcrc += sarry->sctps_recvhwcrc;
+ sb.sctps_sendpackets += sarry->sctps_sendpackets;
+ sb.sctps_sendsacks += sarry->sctps_sendsacks;
+ sb.sctps_senddata += sarry->sctps_senddata;
+ sb.sctps_sendretransdata += sarry->sctps_sendretransdata;
+ sb.sctps_sendfastretrans += sarry->sctps_sendfastretrans;
+ sb.sctps_sendmultfastretrans += sarry->sctps_sendmultfastretrans;
+ sb.sctps_sendheartbeat += sarry->sctps_sendheartbeat;
+ sb.sctps_sendecne += sarry->sctps_sendecne;
+ sb.sctps_sendauth += sarry->sctps_sendauth;
+ sb.sctps_senderrors += sarry->sctps_senderrors;
+ sb.sctps_sendswcrc += sarry->sctps_sendswcrc;
+ sb.sctps_sendhwcrc += sarry->sctps_sendhwcrc;
+ sb.sctps_pdrpfmbox += sarry->sctps_pdrpfmbox;
+ sb.sctps_pdrpfehos += sarry->sctps_pdrpfehos;
+ sb.sctps_pdrpmbda += sarry->sctps_pdrpmbda;
+ sb.sctps_pdrpmbct += sarry->sctps_pdrpmbct;
+ sb.sctps_pdrpbwrpt += sarry->sctps_pdrpbwrpt;
+ sb.sctps_pdrpcrupt += sarry->sctps_pdrpcrupt;
+ sb.sctps_pdrpnedat += sarry->sctps_pdrpnedat;
+ sb.sctps_pdrppdbrk += sarry->sctps_pdrppdbrk;
+ sb.sctps_pdrptsnnf += sarry->sctps_pdrptsnnf;
+ sb.sctps_pdrpdnfnd += sarry->sctps_pdrpdnfnd;
+ sb.sctps_pdrpdiwnp += sarry->sctps_pdrpdiwnp;
+ sb.sctps_pdrpdizrw += sarry->sctps_pdrpdizrw;
+ sb.sctps_pdrpbadd += sarry->sctps_pdrpbadd;
+ sb.sctps_pdrpmark += sarry->sctps_pdrpmark;
+ sb.sctps_timoiterator += sarry->sctps_timoiterator;
+ sb.sctps_timodata += sarry->sctps_timodata;
+ sb.sctps_timowindowprobe += sarry->sctps_timowindowprobe;
+ sb.sctps_timoinit += sarry->sctps_timoinit;
+ sb.sctps_timosack += sarry->sctps_timosack;
+ sb.sctps_timoshutdown += sarry->sctps_timoshutdown;
+ sb.sctps_timoheartbeat += sarry->sctps_timoheartbeat;
+ sb.sctps_timocookie += sarry->sctps_timocookie;
+ sb.sctps_timosecret += sarry->sctps_timosecret;
+ sb.sctps_timopathmtu += sarry->sctps_timopathmtu;
+ sb.sctps_timoshutdownack += sarry->sctps_timoshutdownack;
+ sb.sctps_timoshutdownguard += sarry->sctps_timoshutdownguard;
+ sb.sctps_timostrmrst += sarry->sctps_timostrmrst;
+ sb.sctps_timoearlyfr += sarry->sctps_timoearlyfr;
+ sb.sctps_timoasconf += sarry->sctps_timoasconf;
+ sb.sctps_timodelprim += sarry->sctps_timodelprim;
+ sb.sctps_timoautoclose += sarry->sctps_timoautoclose;
+ sb.sctps_timoassockill += sarry->sctps_timoassockill;
+ sb.sctps_timoinpkill += sarry->sctps_timoinpkill;
+ sb.sctps_hdrops += sarry->sctps_hdrops;
+ sb.sctps_badsum += sarry->sctps_badsum;
+ sb.sctps_noport += sarry->sctps_noport;
+ sb.sctps_badvtag += sarry->sctps_badvtag;
+ sb.sctps_badsid += sarry->sctps_badsid;
+ sb.sctps_nomem += sarry->sctps_nomem;
+ sb.sctps_fastretransinrtt += sarry->sctps_fastretransinrtt;
+ sb.sctps_markedretrans += sarry->sctps_markedretrans;
+ sb.sctps_naglesent += sarry->sctps_naglesent;
+ sb.sctps_naglequeued += sarry->sctps_naglequeued;
+ sb.sctps_maxburstqueued += sarry->sctps_maxburstqueued;
+ sb.sctps_ifnomemqueued += sarry->sctps_ifnomemqueued;
+ sb.sctps_windowprobed += sarry->sctps_windowprobed;
+ sb.sctps_lowlevelerr += sarry->sctps_lowlevelerr;
+ sb.sctps_lowlevelerrusr += sarry->sctps_lowlevelerrusr;
+ sb.sctps_datadropchklmt += sarry->sctps_datadropchklmt;
+ sb.sctps_datadroprwnd += sarry->sctps_datadroprwnd;
+ sb.sctps_ecnereducedcwnd += sarry->sctps_ecnereducedcwnd;
+ sb.sctps_vtagexpress += sarry->sctps_vtagexpress;
+ sb.sctps_vtagbogus += sarry->sctps_vtagbogus;
+ sb.sctps_primary_randry += sarry->sctps_primary_randry;
+ sb.sctps_cmt_randry += sarry->sctps_cmt_randry;
+ sb.sctps_slowpath_sack += sarry->sctps_slowpath_sack;
+ sb.sctps_wu_sacks_sent += sarry->sctps_wu_sacks_sent;
+ sb.sctps_sends_with_flags += sarry->sctps_sends_with_flags;
+ sb.sctps_sends_with_unord += sarry->sctps_sends_with_unord;
+ sb.sctps_sends_with_eof += sarry->sctps_sends_with_eof;
+ sb.sctps_sends_with_abort += sarry->sctps_sends_with_abort;
+ sb.sctps_protocol_drain_calls += sarry->sctps_protocol_drain_calls;
+ sb.sctps_protocol_drains_done += sarry->sctps_protocol_drains_done;
+ sb.sctps_read_peeks += sarry->sctps_read_peeks;
+ sb.sctps_cached_chk += sarry->sctps_cached_chk;
+ sb.sctps_cached_strmoq += sarry->sctps_cached_strmoq;
+ sb.sctps_left_abandon += sarry->sctps_left_abandon;
+ sb.sctps_send_burst_avoid += sarry->sctps_send_burst_avoid;
+ sb.sctps_send_cwnd_avoid += sarry->sctps_send_cwnd_avoid;
+ sb.sctps_fwdtsn_map_over += sarry->sctps_fwdtsn_map_over;
+ if (req->newptr != NULL) {
+ memcpy(sarry, &sb_temp, sizeof(struct sctpstat));
+ }
+ }
+ error = SYSCTL_OUT(req, &sb, sizeof(struct sctpstat));
+#else
+ error = SYSCTL_OUT(req, &SCTP_BASE_STATS, sizeof(struct sctpstat));
+ if (error != 0) {
+ return (error);
+ }
+ if (req->newptr != NULL) {
+ memcpy(&SCTP_BASE_STATS, &sb_temp, sizeof(struct sctpstat));
+ }
+#endif
+#else
+ error = SYSCTL_OUT(req, &SCTP_BASE_STATS, sizeof(struct sctpstat));
+#endif
+ return (error);
+}
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_trace_log SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+#else
+static int
+sctp_sysctl_handle_trace_log(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error;
+
+#if defined(_WIN32) && !defined(__Userspace__)
+ error = SYSCTL_OUT(req, SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#else
+ error = SYSCTL_OUT(req, &SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#endif
+ return (error);
+}
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+static int
+sctp_sysctl_handle_trace_log_clear SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, req, oidp)
+#else
+static int
+sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS)
+{
+#endif
+ int error = 0;
+#if defined(_WIN32) && !defined(__Userspace__)
+ int value = 0;
+
+ if (req->new_data == NULL) {
+ return (error);
+ }
+ error = SYSCTL_IN(req, &value, sizeof(int));
+ if (error == 0 && value != 0 && SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+ memset(SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+ }
+#else
+
+ memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+ return (error);
+}
+#endif
+
+#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(__Userspace__)
+#if defined(__FreeBSD__)
+#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \
+ static int \
+ sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS) \
+ { \
+ int error; \
+ uint32_t new; \
+ \
+ new = SCTP_BASE_SYSCTL(var_name); \
+ error = sysctl_handle_int(oidp, &new, 0, req); \
+ if ((error == 0) && (req->newptr != NULL)) { \
+ if ((new < prefix##_MIN) || \
+ (new > prefix##_MAX)) { \
+ error = EINVAL; \
+ } else { \
+ SCTP_BASE_SYSCTL(var_name) = new; \
+ } \
+ } \
+ return (error); \
+ } \
+ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name, \
+ CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW, NULL, 0, \
+ sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC);
+#else
+#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \
+ static int \
+ sctp_sysctl_handle_##mib_name(struct sysctl_oid *oidp, \
+ void *arg1 __attribute__((unused)), \
+ int arg2 __attribute__((unused)), \
+ struct sysctl_req *req) \
+ { \
+ int error; \
+ uint32_t new; \
+ \
+ new = SCTP_BASE_SYSCTL(var_name); \
+ error = sysctl_handle_int(oidp, &new, 0, req); \
+ if ((error == 0) && (req->newptr != USER_ADDR_NULL)) { \
+ if ((new < prefix##_MIN) || \
+ (new > prefix##_MAX)) { \
+ error = EINVAL; \
+ } else { \
+ SCTP_BASE_SYSCTL(var_name) = new; \
+ } \
+ } \
+ return (error); \
+ } \
+ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name, \
+ CTLTYPE_INT | CTLFLAG_RW, NULL, 0, \
+ sctp_sysctl_handle_##mib_name, "I", prefix##_DESC);
+#define CTLTYPE_UINT CTLTYPE_INT
+#define CTLFLAG_VNET 0
+#endif
+
+/*
+ * sysctl definitions
+ */
+
+SCTP_UINT_SYSCTL(sendspace, sctp_sendspace, SCTPCTL_MAXDGRAM)
+SCTP_UINT_SYSCTL(recvspace, sctp_recvspace, SCTPCTL_RECVSPACE)
+SCTP_UINT_SYSCTL(auto_asconf, sctp_auto_asconf, SCTPCTL_AUTOASCONF)
+SCTP_UINT_SYSCTL(ecn_enable, sctp_ecn_enable, SCTPCTL_ECN_ENABLE)
+SCTP_UINT_SYSCTL(pr_enable, sctp_pr_enable, SCTPCTL_PR_ENABLE)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auth_enable, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_auth, "IU", SCTPCTL_AUTH_ENABLE_DESC);
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asconf_enable, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_asconf, "IU", SCTPCTL_ASCONF_ENABLE_DESC);
+SCTP_UINT_SYSCTL(reconfig_enable, sctp_reconfig_enable, SCTPCTL_RECONFIG_ENABLE)
+SCTP_UINT_SYSCTL(nrsack_enable, sctp_nrsack_enable, SCTPCTL_NRSACK_ENABLE)
+SCTP_UINT_SYSCTL(pktdrop_enable, sctp_pktdrop_enable, SCTPCTL_PKTDROP_ENABLE)
+#if defined(__APPLE__) && !defined(__Userspace__)
+SCTP_UINT_SYSCTL(loopback_nocsum, sctp_no_csum_on_loopback, SCTPCTL_LOOPBACK_NOCSUM)
+#endif
+SCTP_UINT_SYSCTL(peer_chkoh, sctp_peer_chunk_oh, SCTPCTL_PEER_CHKOH)
+SCTP_UINT_SYSCTL(maxburst, sctp_max_burst_default, SCTPCTL_MAXBURST)
+SCTP_UINT_SYSCTL(fr_maxburst, sctp_fr_max_burst_default, SCTPCTL_FRMAXBURST)
+SCTP_UINT_SYSCTL(maxchunks, sctp_max_chunks_on_queue, SCTPCTL_MAXCHUNKS)
+SCTP_UINT_SYSCTL(tcbhashsize, sctp_hashtblsize, SCTPCTL_TCBHASHSIZE)
+SCTP_UINT_SYSCTL(pcbhashsize, sctp_pcbtblsize, SCTPCTL_PCBHASHSIZE)
+SCTP_UINT_SYSCTL(min_split_point, sctp_min_split_point, SCTPCTL_MIN_SPLIT_POINT)
+SCTP_UINT_SYSCTL(chunkscale, sctp_chunkscale, SCTPCTL_CHUNKSCALE)
+SCTP_UINT_SYSCTL(delayed_sack_time, sctp_delayed_sack_time_default, SCTPCTL_DELAYED_SACK_TIME)
+SCTP_UINT_SYSCTL(sack_freq, sctp_sack_freq_default, SCTPCTL_SACK_FREQ)
+SCTP_UINT_SYSCTL(sys_resource, sctp_system_free_resc_limit, SCTPCTL_SYS_RESOURCE)
+SCTP_UINT_SYSCTL(asoc_resource, sctp_asoc_free_resc_limit, SCTPCTL_ASOC_RESOURCE)
+SCTP_UINT_SYSCTL(heartbeat_interval, sctp_heartbeat_interval_default, SCTPCTL_HEARTBEAT_INTERVAL)
+SCTP_UINT_SYSCTL(pmtu_raise_time, sctp_pmtu_raise_time_default, SCTPCTL_PMTU_RAISE_TIME)
+SCTP_UINT_SYSCTL(shutdown_guard_time, sctp_shutdown_guard_time_default, SCTPCTL_SHUTDOWN_GUARD_TIME)
+SCTP_UINT_SYSCTL(secret_lifetime, sctp_secret_lifetime_default, SCTPCTL_SECRET_LIFETIME)
+SCTP_UINT_SYSCTL(rto_max, sctp_rto_max_default, SCTPCTL_RTO_MAX)
+SCTP_UINT_SYSCTL(rto_min, sctp_rto_min_default, SCTPCTL_RTO_MIN)
+SCTP_UINT_SYSCTL(rto_initial, sctp_rto_initial_default, SCTPCTL_RTO_INITIAL)
+SCTP_UINT_SYSCTL(init_rto_max, sctp_init_rto_max_default, SCTPCTL_INIT_RTO_MAX)
+SCTP_UINT_SYSCTL(valid_cookie_life, sctp_valid_cookie_life_default, SCTPCTL_VALID_COOKIE_LIFE)
+SCTP_UINT_SYSCTL(init_rtx_max, sctp_init_rtx_max_default, SCTPCTL_INIT_RTX_MAX)
+SCTP_UINT_SYSCTL(assoc_rtx_max, sctp_assoc_rtx_max_default, SCTPCTL_ASSOC_RTX_MAX)
+SCTP_UINT_SYSCTL(path_rtx_max, sctp_path_rtx_max_default, SCTPCTL_PATH_RTX_MAX)
+SCTP_UINT_SYSCTL(path_pf_threshold, sctp_path_pf_threshold, SCTPCTL_PATH_PF_THRESHOLD)
+SCTP_UINT_SYSCTL(add_more_on_output, sctp_add_more_threshold, SCTPCTL_ADD_MORE_ON_OUTPUT)
+SCTP_UINT_SYSCTL(incoming_streams, sctp_nr_incoming_streams_default, SCTPCTL_INCOMING_STREAMS)
+SCTP_UINT_SYSCTL(outgoing_streams, sctp_nr_outgoing_streams_default, SCTPCTL_OUTGOING_STREAMS)
+SCTP_UINT_SYSCTL(cmt_on_off, sctp_cmt_on_off, SCTPCTL_CMT_ON_OFF)
+SCTP_UINT_SYSCTL(cmt_use_dac, sctp_cmt_use_dac, SCTPCTL_CMT_USE_DAC)
+SCTP_UINT_SYSCTL(cwnd_maxburst, sctp_use_cwnd_based_maxburst, SCTPCTL_CWND_MAXBURST)
+SCTP_UINT_SYSCTL(nat_friendly, sctp_nat_friendly, SCTPCTL_NAT_FRIENDLY)
+SCTP_UINT_SYSCTL(abc_l_var, sctp_L2_abc_variable, SCTPCTL_ABC_L_VAR)
+SCTP_UINT_SYSCTL(max_chained_mbufs, sctp_mbuf_threshold_count, SCTPCTL_MAX_CHAINED_MBUFS)
+SCTP_UINT_SYSCTL(do_sctp_drain, sctp_do_drain, SCTPCTL_DO_SCTP_DRAIN)
+SCTP_UINT_SYSCTL(hb_max_burst, sctp_hb_maxburst, SCTPCTL_HB_MAX_BURST)
+SCTP_UINT_SYSCTL(abort_at_limit, sctp_abort_if_one_2_one_hits_limit, SCTPCTL_ABORT_AT_LIMIT)
+SCTP_UINT_SYSCTL(min_residual, sctp_min_residual, SCTPCTL_MIN_RESIDUAL)
+SCTP_UINT_SYSCTL(max_retran_chunk, sctp_max_retran_chunk, SCTPCTL_MAX_RETRAN_CHUNK)
+SCTP_UINT_SYSCTL(log_level, sctp_logging_level, SCTPCTL_LOGGING_LEVEL)
+SCTP_UINT_SYSCTL(default_cc_module, sctp_default_cc_module, SCTPCTL_DEFAULT_CC_MODULE)
+SCTP_UINT_SYSCTL(default_ss_module, sctp_default_ss_module, SCTPCTL_DEFAULT_SS_MODULE)
+SCTP_UINT_SYSCTL(default_frag_interleave, sctp_default_frag_interleave, SCTPCTL_DEFAULT_FRAG_INTERLEAVE)
+SCTP_UINT_SYSCTL(mobility_base, sctp_mobility_base, SCTPCTL_MOBILITY_BASE)
+SCTP_UINT_SYSCTL(mobility_fasthandoff, sctp_mobility_fasthandoff, SCTPCTL_MOBILITY_FASTHANDOFF)
+#if defined(SCTP_LOCAL_TRACE_BUF)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, log, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RD,
+ NULL, 0, sctp_sysctl_handle_trace_log, "S,sctplog", "SCTP logging (struct sctp_log)");
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, clear_trace, CTLFLAG_VNET|CTLTYPE_UINT | CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_trace_log_clear, "IU", "Clear SCTP Logging buffer");
+#endif
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_port, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_udp_tunneling, "IU", SCTPCTL_UDP_TUNNELING_PORT_DESC);
+SCTP_UINT_SYSCTL(enable_sack_immediately, sctp_enable_sack_immediately, SCTPCTL_SACK_IMMEDIATELY_ENABLE)
+SCTP_UINT_SYSCTL(nat_friendly_init, sctp_inits_include_nat_friendly, SCTPCTL_NAT_FRIENDLY_INITS)
+SCTP_UINT_SYSCTL(vtag_time_wait, sctp_vtag_time_wait, SCTPCTL_TIME_WAIT)
+SCTP_UINT_SYSCTL(buffer_splitting, sctp_buffer_splitting, SCTPCTL_BUFFER_SPLITTING)
+SCTP_UINT_SYSCTL(initial_cwnd, sctp_initial_cwnd, SCTPCTL_INITIAL_CWND)
+SCTP_UINT_SYSCTL(rttvar_bw, sctp_rttvar_bw, SCTPCTL_RTTVAR_BW)
+SCTP_UINT_SYSCTL(rttvar_rtt, sctp_rttvar_rtt, SCTPCTL_RTTVAR_RTT)
+SCTP_UINT_SYSCTL(rttvar_eqret, sctp_rttvar_eqret, SCTPCTL_RTTVAR_EQRET)
+SCTP_UINT_SYSCTL(rttvar_steady_step, sctp_steady_step, SCTPCTL_RTTVAR_STEADYS)
+SCTP_UINT_SYSCTL(use_dcccecn, sctp_use_dccc_ecn, SCTPCTL_RTTVAR_DCCCECN)
+SCTP_UINT_SYSCTL(blackhole, sctp_blackhole, SCTPCTL_BLACKHOLE)
+SCTP_UINT_SYSCTL(sendall_limit, sctp_sendall_limit, SCTPCTL_SENDALL_LIMIT)
+SCTP_UINT_SYSCTL(diag_info_code, sctp_diag_info_code, SCTPCTL_DIAG_INFO_CODE)
+#ifdef SCTP_DEBUG
+SCTP_UINT_SYSCTL(debug, sctp_debug_on, SCTPCTL_DEBUG)
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+SCTP_UINT_SYSCTL(main_timer, sctp_main_timer, SCTPCTL_MAIN_TIMER)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ignore_vmware_interfaces, CTLTYPE_UINT|CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_vmware_interfaces, "IU", SCTPCTL_IGNORE_VMWARE_INTERFACES_DESC);
+SCTP_UINT_SYSCTL(addr_watchdog_limit, sctp_addr_watchdog_limit, SCTPCTL_ADDR_WATCHDOG_LIMIT)
+SCTP_UINT_SYSCTL(vtag_watchdog_limit, sctp_vtag_watchdog_limit, SCTPCTL_VTAG_WATCHDOG_LIMIT)
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+SCTP_UINT_SYSCTL(output_unlocked, sctp_output_unlocked, SCTPCTL_OUTPUT_UNLOCKED)
+#endif
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RW,
+ NULL, 0, sctp_sysctl_handle_stats, "S,sctpstat", "SCTP statistics (struct sctp_stat)");
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_VNET|CTLTYPE_OPAQUE|CTLFLAG_RD,
+ NULL, 0, sctp_sysctl_handle_assoclist, "S,xassoc", "List of active SCTP associations");
+
+#elif defined(_WIN32) && !defined(__Userspace__)
+
+#define RANGECHK(var, min, max) \
+ if ((var) < (min)) { (var) = (min); } \
+ else if ((var) > (max)) { (var) = (max); }
+
+static int
+sctp_sysctl_handle_int(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+
+ error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (error == 0) {
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_sendspace), SCTPCTL_MAXDGRAM_MIN, SCTPCTL_MAXDGRAM_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_recvspace), SCTPCTL_RECVSPACE_MIN, SCTPCTL_RECVSPACE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_enable), SCTPCTL_ECN_ENABLE_MIN, SCTPCTL_ECN_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pr_enable), SCTPCTL_PR_ENABLE_MIN, SCTPCTL_PR_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_reconfig_enable), SCTPCTL_RECONFIG_ENABLE_MIN, SCTPCTL_RECONFIG_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nrsack_enable), SCTPCTL_NRSACK_ENABLE_MIN, SCTPCTL_NRSACK_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pktdrop_enable), SCTPCTL_PKTDROP_ENABLE_MIN, SCTPCTL_PKTDROP_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), SCTPCTL_LOOPBACK_NOCSUM_MIN, SCTPCTL_LOOPBACK_NOCSUM_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), SCTPCTL_PEER_CHKOH_MIN, SCTPCTL_PEER_CHKOH_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_burst_default), SCTPCTL_MAXBURST_MIN, SCTPCTL_MAXBURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_fr_max_burst_default), SCTPCTL_FRMAXBURST_MIN, SCTPCTL_FRMAXBURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), SCTPCTL_MAXCHUNKS_MIN, SCTPCTL_MAXCHUNKS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_hashtblsize), SCTPCTL_TCBHASHSIZE_MIN, SCTPCTL_TCBHASHSIZE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pcbtblsize), SCTPCTL_PCBHASHSIZE_MIN, SCTPCTL_PCBHASHSIZE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_min_split_point), SCTPCTL_MIN_SPLIT_POINT_MIN, SCTPCTL_MIN_SPLIT_POINT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_chunkscale), SCTPCTL_CHUNKSCALE_MIN, SCTPCTL_CHUNKSCALE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), SCTPCTL_DELAYED_SACK_TIME_MIN, SCTPCTL_DELAYED_SACK_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_sack_freq_default), SCTPCTL_SACK_FREQ_MIN, SCTPCTL_SACK_FREQ_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), SCTPCTL_SYS_RESOURCE_MIN, SCTPCTL_SYS_RESOURCE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), SCTPCTL_ASOC_RESOURCE_MIN, SCTPCTL_ASOC_RESOURCE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), SCTPCTL_HEARTBEAT_INTERVAL_MIN, SCTPCTL_HEARTBEAT_INTERVAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), SCTPCTL_PMTU_RAISE_TIME_MIN, SCTPCTL_PMTU_RAISE_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), SCTPCTL_SHUTDOWN_GUARD_TIME_MIN, SCTPCTL_SHUTDOWN_GUARD_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), SCTPCTL_SECRET_LIFETIME_MIN, SCTPCTL_SECRET_LIFETIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_max_default), SCTPCTL_RTO_MAX_MIN, SCTPCTL_RTO_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_min_default), SCTPCTL_RTO_MIN_MIN, SCTPCTL_RTO_MIN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_initial_default), SCTPCTL_RTO_INITIAL_MIN, SCTPCTL_RTO_INITIAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rto_max_default), SCTPCTL_INIT_RTO_MAX_MIN, SCTPCTL_INIT_RTO_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), SCTPCTL_VALID_COOKIE_LIFE_MIN, SCTPCTL_VALID_COOKIE_LIFE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), SCTPCTL_INIT_RTX_MAX_MIN, SCTPCTL_INIT_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), SCTPCTL_ASSOC_RTX_MAX_MIN, SCTPCTL_ASSOC_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), SCTPCTL_PATH_RTX_MAX_MIN, SCTPCTL_PATH_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_path_pf_threshold), SCTPCTL_PATH_PF_THRESHOLD_MIN, SCTPCTL_PATH_PF_THRESHOLD_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default), SCTPCTL_INCOMING_STREAMS_MIN, SCTPCTL_INCOMING_STREAMS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_L2_abc_variable), SCTPCTL_ABC_L_VAR_MIN, SCTPCTL_ABC_L_VAR_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), SCTPCTL_MAX_CHAINED_MBUFS_MIN, SCTPCTL_MAX_CHAINED_MBUFS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_do_drain), SCTPCTL_DO_SCTP_DRAIN_MIN, SCTPCTL_DO_SCTP_DRAIN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_hb_maxburst), SCTPCTL_HB_MAX_BURST_MIN, SCTPCTL_HB_MAX_BURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), SCTPCTL_ABORT_AT_LIMIT_MIN, SCTPCTL_ABORT_AT_LIMIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_min_residual), SCTPCTL_MIN_RESIDUAL_MIN, SCTPCTL_MIN_RESIDUAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_retran_chunk), SCTPCTL_MAX_RETRAN_CHUNK_MIN, SCTPCTL_MAX_RETRAN_CHUNK_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_logging_level), SCTPCTL_LOGGING_LEVEL_MIN, SCTPCTL_LOGGING_LEVEL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_default_cc_module), SCTPCTL_DEFAULT_CC_MODULE_MIN, SCTPCTL_DEFAULT_CC_MODULE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_default_ss_module), SCTPCTL_DEFAULT_SS_MODULE_MIN, SCTPCTL_DEFAULT_SS_MODULE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_default_frag_interleave), SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_vtag_time_wait), SCTPCTL_TIME_WAIT_MIN, SCTPCTL_TIME_WAIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_buffer_splitting), SCTPCTL_BUFFER_SPLITTING_MIN, SCTPCTL_BUFFER_SPLITTING_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_initial_cwnd), SCTPCTL_INITIAL_CWND_MIN, SCTPCTL_INITIAL_CWND_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_bw), SCTPCTL_RTTVAR_BW_MIN, SCTPCTL_RTTVAR_BW_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_rtt), SCTPCTL_RTTVAR_RTT_MIN, SCTPCTL_RTTVAR_RTT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_eqret), SCTPCTL_RTTVAR_EQRET_MIN, SCTPCTL_RTTVAR_EQRET_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_steady_step), SCTPCTL_RTTVAR_STEADYS_MIN, SCTPCTL_RTTVAR_STEADYS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_use_dccc_ecn), SCTPCTL_RTTVAR_DCCCECN_MIN, SCTPCTL_RTTVAR_DCCCECN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_base), SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), SCTPCTL_NAT_FRIENDLY_INITS_MIN, SCTPCTL_NAT_FRIENDLY_INITS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_blackhole), SCTPCTL_BLACKHOLE_MIN, SCTPCTL_BLACKHOLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_sendall_limit), SCTPCTL_SENDALL_LIMIT_MIN, SCTPCTL_SENDALL_LIMIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_diag_info_code), SCTPCTL_DIAG_INFO_CODE_MIN, SCTPCTL_DIAG_INFO_CODE_MAX);
+#ifdef SCTP_DEBUG
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
+#endif
+ }
+ return (error);
+}
+
+void
+sysctl_setup_sctp(void)
+{
+ sysctl_add_oid(&sysctl_oid_top, "sendspace", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_sendspace), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MAXDGRAM_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "recvspace", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_recvspace), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RECVSPACE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "auto_asconf", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_auto_asconf), 0, sctp_sysctl_handle_int,
+ SCTPCTL_AUTOASCONF_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "ecn_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_ecn_enable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ECN_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "pr_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pr_enable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PR_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "auth_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_auth_enable), 0, sctp_sysctl_handle_auth,
+ SCTPCTL_AUTH_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "asconf_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_asconf_enable), 0, sctp_sysctl_handle_asconf,
+ SCTPCTL_ASCONF_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "reconfig_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_reconfig_enable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RECONFIG_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "nrsack_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nrsack_enable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_NRSACK_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "pktdrop_enable", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pktdrop_enable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PKTDROP_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "loopback_nocsum", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), 0, sctp_sysctl_handle_int,
+ SCTPCTL_LOOPBACK_NOCSUM_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "peer_chkoh", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PEER_CHKOH_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "maxburst", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_burst_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MAXBURST_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "fr_maxburst", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_fr_max_burst_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_FRMAXBURST_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "maxchunks", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MAXCHUNKS_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "tcbhashsize", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_hashtblsize), 0, sctp_sysctl_handle_int,
+ SCTPCTL_TCBHASHSIZE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "pcbhashsize", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pcbtblsize), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PCBHASHSIZE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "min_split_point", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_min_split_point), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MIN_SPLIT_POINT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "chunkscale", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_chunkscale), 0, sctp_sysctl_handle_int,
+ SCTPCTL_CHUNKSCALE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "delayed_sack_time", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DELAYED_SACK_TIME_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "sack_freq", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_sack_freq_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SACK_FREQ_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "sys_resource", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SYS_RESOURCE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "asoc_resource", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ASOC_RESOURCE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "heartbeat_interval", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_HEARTBEAT_INTERVAL_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "pmtu_raise_time", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PMTU_RAISE_TIME_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "shutdown_guard_time", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SHUTDOWN_GUARD_TIME_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "secret_lifetime", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SECRET_LIFETIME_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rto_max", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_max_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTO_MAX_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rto_min", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_min_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTO_MIN_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rto_initial", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_initial_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTO_INITIAL_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "init_rto_max", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_init_rto_max_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_INIT_RTO_MAX_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "valid_cookie_life", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_VALID_COOKIE_LIFE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "init_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_INIT_RTX_MAX_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "assoc_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ASSOC_RTX_MAX_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "path_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PATH_RTX_MAX_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "path_pf_threshold", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_path_pf_threshold), 0, sctp_sysctl_handle_int,
+ SCTPCTL_PATH_PF_THRESHOLD_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "add_more_on_output", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_add_more_threshold), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ADD_MORE_ON_OUTPUT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "incoming_streams", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_INCOMING_STREAMS_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "outgoing_streams", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), 0, sctp_sysctl_handle_int,
+ SCTPCTL_OUTGOING_STREAMS_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "cmt_on_off", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sctp_sysctl_handle_int,
+ SCTPCTL_CMT_ON_OFF_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "cmt_use_dac", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sctp_sysctl_handle_int,
+ SCTPCTL_CMT_USE_DAC_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "cwnd_maxburst", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), 0, sctp_sysctl_handle_int,
+ SCTPCTL_CWND_MAXBURST_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "nat_friendly", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nat_friendly), 0, sctp_sysctl_handle_int,
+ SCTPCTL_NAT_FRIENDLY_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "abc_l_var", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_L2_abc_variable), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ABC_L_VAR_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "max_chained_mbufs", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MAX_CHAINED_MBUFS_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "do_sctp_drain", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_do_drain), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DO_SCTP_DRAIN_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "hb_max_burst", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_hb_maxburst), 0, sctp_sysctl_handle_int,
+ SCTPCTL_HB_MAX_BURST_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "abort_at_limit", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), 0, sctp_sysctl_handle_int,
+ SCTPCTL_ABORT_AT_LIMIT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "min_residual", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_min_residual), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MIN_RESIDUAL_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "max_retran_chunk", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_retran_chunk), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MAX_RETRAN_CHUNK_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "log_level", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_logging_level), 0, sctp_sysctl_handle_int,
+ SCTPCTL_LOGGING_LEVEL_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "default_cc_module", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_default_cc_module), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DEFAULT_CC_MODULE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "default_ss_module", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_default_ss_module), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DEFAULT_SS_MODULE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "default_frag_interleave", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_default_frag_interleave), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "mobility_base", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mobility_base), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MOBILITY_BASE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "mobility_fasthandoff", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), 0, sctp_sysctl_handle_int,
+ SCTPCTL_MOBILITY_FASTHANDOFF_DESC);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ sysctl_add_oid(&sysctl_oid_top, "sctp_log", CTLTYPE_STRUCT|CTLFLAG_RD,
+ SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log), NULL,
+ "SCTP logging (struct sctp_log)");
+
+ sysctl_add_oid(&sysctl_oid_top, "clear_trace", CTLTYPE_INT|CTLFLAG_WR,
+ NULL, 0, sctp_sysctl_handle_trace_log_clear,
+ "Clear SCTP Logging buffer");
+#endif
+
+ sysctl_add_oid(&sysctl_oid_top, "udp_tunneling_port", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), 0, sctp_sysctl_handle_udp_tunneling,
+ SCTPCTL_UDP_TUNNELING_PORT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "enable_sack_immediately", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "nat_friendly_init", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), 0, sctp_sysctl_handle_int,
+ SCTPCTL_NAT_FRIENDLY_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "vtag_time_wait", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_vtag_time_wait), 0, sctp_sysctl_handle_int,
+ SCTPCTL_TIME_WAIT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "buffer_splitting", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_buffer_splitting), 0, sctp_sysctl_handle_int,
+ SCTPCTL_BUFFER_SPLITTING_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "initial_cwnd", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_initial_cwnd), 0, sctp_sysctl_handle_int,
+ SCTPCTL_INITIAL_CWND_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rttvar_bw", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rttvar_bw), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTTVAR_BW_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rttvar_rtt", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rttvar_rtt), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTTVAR_RTT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rttvar_eqret", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rttvar_eqret), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTTVAR_EQRET_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "rttvar_steady_step", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_steady_step), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTTVAR_STEADYS_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "use_dcccecn", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_use_dccc_ecn), 0, sctp_sysctl_handle_int,
+ SCTPCTL_RTTVAR_DCCCECN_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "blackhole", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_blackhole), 0, sctp_sysctl_handle_int,
+ SCTPCTL_BLACKHOLE_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "sendall_limit", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_sendall_limit), 0, sctp_sysctl_handle_int,
+ SCTPCTL_SENDALL_LIMIT_DESC);
+
+ sysctl_add_oid(&sysctl_oid_top, "diag_info_code", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_diag_info_code), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DIAG_INFO_CODE_DESC);
+
+#ifdef SCTP_DEBUG
+ sysctl_add_oid(&sysctl_oid_top, "debug", CTLTYPE_INT|CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_debug_on), 0, sctp_sysctl_handle_int,
+ SCTPCTL_DEBUG_DESC);
+#endif
+
+ sysctl_add_oid(&sysctl_oid_top, "stats", CTLTYPE_STRUCT|CTLFLAG_RW,
+ &SCTP_BASE_STATS, sizeof(SCTP_BASE_STATS), NULL,
+ "SCTP statistics (struct sctp_stat)");
+
+ sysctl_add_oid(&sysctl_oid_top, "assoclist", CTLTYPE_STRUCT|CTLFLAG_RD,
+ NULL, 0, sctp_assoclist,
+ "List of active SCTP associations");
+}
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.h
new file mode 100644
index 000000000..eb888338c
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_sysctl.h
@@ -0,0 +1,632 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.h 361895 2020-06-07 14:39:20Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_SYSCTL_H_
+#define _NETINET_SCTP_SYSCTL_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_constants.h>
+
+struct sctp_sysctl {
+ uint32_t sctp_sendspace;
+ uint32_t sctp_recvspace;
+ uint32_t sctp_auto_asconf;
+ uint32_t sctp_multiple_asconfs;
+ uint32_t sctp_ecn_enable;
+ uint32_t sctp_pr_enable;
+ uint32_t sctp_auth_enable;
+ uint32_t sctp_asconf_enable;
+ uint32_t sctp_reconfig_enable;
+ uint32_t sctp_nrsack_enable;
+ uint32_t sctp_pktdrop_enable;
+ uint32_t sctp_fr_max_burst_default;
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ uint32_t sctp_no_csum_on_loopback;
+#endif
+ uint32_t sctp_peer_chunk_oh;
+ uint32_t sctp_max_burst_default;
+ uint32_t sctp_max_chunks_on_queue;
+ uint32_t sctp_hashtblsize;
+ uint32_t sctp_pcbtblsize;
+ uint32_t sctp_min_split_point;
+ uint32_t sctp_chunkscale;
+ uint32_t sctp_delayed_sack_time_default;
+ uint32_t sctp_sack_freq_default;
+ uint32_t sctp_system_free_resc_limit;
+ uint32_t sctp_asoc_free_resc_limit;
+ uint32_t sctp_heartbeat_interval_default;
+ uint32_t sctp_pmtu_raise_time_default;
+ uint32_t sctp_shutdown_guard_time_default;
+ uint32_t sctp_secret_lifetime_default;
+ uint32_t sctp_rto_max_default;
+ uint32_t sctp_rto_min_default;
+ uint32_t sctp_rto_initial_default;
+ uint32_t sctp_init_rto_max_default;
+ uint32_t sctp_valid_cookie_life_default;
+ uint32_t sctp_init_rtx_max_default;
+ uint32_t sctp_assoc_rtx_max_default;
+ uint32_t sctp_path_rtx_max_default;
+ uint32_t sctp_path_pf_threshold;
+ uint32_t sctp_add_more_threshold;
+ uint32_t sctp_nr_incoming_streams_default;
+ uint32_t sctp_nr_outgoing_streams_default;
+ uint32_t sctp_cmt_on_off;
+ uint32_t sctp_cmt_use_dac;
+ uint32_t sctp_use_cwnd_based_maxburst;
+ uint32_t sctp_nat_friendly;
+ uint32_t sctp_L2_abc_variable;
+ uint32_t sctp_mbuf_threshold_count;
+ uint32_t sctp_do_drain;
+ uint32_t sctp_hb_maxburst;
+ uint32_t sctp_abort_if_one_2_one_hits_limit;
+ uint32_t sctp_min_residual;
+ uint32_t sctp_max_retran_chunk;
+ uint32_t sctp_logging_level;
+ /* JRS - Variable for default congestion control module */
+ uint32_t sctp_default_cc_module;
+ /* RS - Variable for default stream scheduling module */
+ uint32_t sctp_default_ss_module;
+ uint32_t sctp_default_frag_interleave;
+ uint32_t sctp_mobility_base;
+ uint32_t sctp_mobility_fasthandoff;
+ uint32_t sctp_inits_include_nat_friendly;
+ uint32_t sctp_rttvar_bw;
+ uint32_t sctp_rttvar_rtt;
+ uint32_t sctp_rttvar_eqret;
+ uint32_t sctp_steady_step;
+ uint32_t sctp_use_dccc_ecn;
+ uint32_t sctp_diag_info_code;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(_WIN32) && !defined(__Userspace__)
+ struct sctp_log *sctp_log;
+#else
+ struct sctp_log sctp_log;
+#endif
+#endif
+ uint32_t sctp_udp_tunneling_port;
+ uint32_t sctp_enable_sack_immediately;
+ uint32_t sctp_vtag_time_wait;
+ uint32_t sctp_buffer_splitting;
+ uint32_t sctp_initial_cwnd;
+ uint32_t sctp_blackhole;
+ uint32_t sctp_sendall_limit;
+#if defined(SCTP_DEBUG)
+ uint32_t sctp_debug_on;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ uint32_t sctp_ignore_vmware_interfaces;
+ uint32_t sctp_main_timer;
+ uint32_t sctp_addr_watchdog_limit;
+ uint32_t sctp_vtag_watchdog_limit;
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ uint32_t sctp_output_unlocked;
+#endif
+};
+
+/*
+ * limits for the sysctl variables
+ */
+/* maxdgram: Maximum outgoing SCTP buffer size */
+#define SCTPCTL_MAXDGRAM_DESC "Maximum outgoing SCTP buffer size"
+#define SCTPCTL_MAXDGRAM_MIN 0
+#define SCTPCTL_MAXDGRAM_MAX 0xFFFFFFFF
+#if defined(__Userspace__)
+#define SCTPCTL_MAXDGRAM_DEFAULT SB_MAX
+#else
+#define SCTPCTL_MAXDGRAM_DEFAULT 262144 /* 256k */
+#endif
+
+/* recvspace: Maximum incoming SCTP buffer size */
+#define SCTPCTL_RECVSPACE_DESC "Maximum incoming SCTP buffer size"
+#define SCTPCTL_RECVSPACE_MIN 0
+#define SCTPCTL_RECVSPACE_MAX 0xFFFFFFFF
+#if defined(__Userspace__)
+#define SCTPCTL_RECVSPACE_DEFAULT SB_RAW
+#else
+#define SCTPCTL_RECVSPACE_DEFAULT 262144 /* 256k */
+#endif
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_AUTOASCONF_DESC "Enable SCTP Auto-ASCONF"
+#define SCTPCTL_AUTOASCONF_MIN 0
+#define SCTPCTL_AUTOASCONF_MAX 1
+#define SCTPCTL_AUTOASCONF_DEFAULT 1
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_MULTIPLEASCONFS_DESC "Enable SCTP Muliple-ASCONFs"
+#define SCTPCTL_MULTIPLEASCONFS_MIN 0
+#define SCTPCTL_MULTIPLEASCONFS_MAX 1
+#define SCTPCTL_MULTIPLEASCONFS_DEFAULT SCTP_DEFAULT_MULTIPLE_ASCONFS
+
+/* ecn_enable: Enable SCTP ECN */
+#define SCTPCTL_ECN_ENABLE_DESC "Enable SCTP ECN"
+#define SCTPCTL_ECN_ENABLE_MIN 0
+#define SCTPCTL_ECN_ENABLE_MAX 1
+#define SCTPCTL_ECN_ENABLE_DEFAULT 1
+
+/* pr_enable: Enable PR-SCTP */
+#define SCTPCTL_PR_ENABLE_DESC "Enable PR-SCTP"
+#define SCTPCTL_PR_ENABLE_MIN 0
+#define SCTPCTL_PR_ENABLE_MAX 1
+#define SCTPCTL_PR_ENABLE_DEFAULT 1
+
+/* auth_enable: Enable SCTP AUTH function */
+#define SCTPCTL_AUTH_ENABLE_DESC "Enable SCTP AUTH function"
+#define SCTPCTL_AUTH_ENABLE_MIN 0
+#define SCTPCTL_AUTH_ENABLE_MAX 1
+#define SCTPCTL_AUTH_ENABLE_DEFAULT 1
+
+/* asconf_enable: Enable SCTP ASCONF */
+#define SCTPCTL_ASCONF_ENABLE_DESC "Enable SCTP ASCONF"
+#define SCTPCTL_ASCONF_ENABLE_MIN 0
+#define SCTPCTL_ASCONF_ENABLE_MAX 1
+#define SCTPCTL_ASCONF_ENABLE_DEFAULT 1
+
+/* reconfig_enable: Enable SCTP RE-CONFIG */
+#define SCTPCTL_RECONFIG_ENABLE_DESC "Enable SCTP RE-CONFIG"
+#define SCTPCTL_RECONFIG_ENABLE_MIN 0
+#define SCTPCTL_RECONFIG_ENABLE_MAX 1
+#define SCTPCTL_RECONFIG_ENABLE_DEFAULT 1
+
+/* nrsack_enable: Enable NR_SACK */
+#define SCTPCTL_NRSACK_ENABLE_DESC "Enable SCTP NR-SACK"
+#define SCTPCTL_NRSACK_ENABLE_MIN 0
+#define SCTPCTL_NRSACK_ENABLE_MAX 1
+#define SCTPCTL_NRSACK_ENABLE_DEFAULT 0
+
+/* pktdrop_enable: Enable SCTP Packet Drop Reports */
+#define SCTPCTL_PKTDROP_ENABLE_DESC "Enable SCTP PKTDROP"
+#define SCTPCTL_PKTDROP_ENABLE_MIN 0
+#define SCTPCTL_PKTDROP_ENABLE_MAX 1
+#define SCTPCTL_PKTDROP_ENABLE_DEFAULT 0
+
+/* loopback_nocsum: Enable NO Csum on packets sent on loopback */
+#define SCTPCTL_LOOPBACK_NOCSUM_DESC "Enable NO Csum on packets sent on loopback"
+#define SCTPCTL_LOOPBACK_NOCSUM_MIN 0
+#define SCTPCTL_LOOPBACK_NOCSUM_MAX 1
+#define SCTPCTL_LOOPBACK_NOCSUM_DEFAULT 1
+
+/* peer_chkoh: Amount to debit peers rwnd per chunk sent */
+#define SCTPCTL_PEER_CHKOH_DESC "Amount to debit peers rwnd per chunk sent"
+#define SCTPCTL_PEER_CHKOH_MIN 0
+#define SCTPCTL_PEER_CHKOH_MAX 0xFFFFFFFF
+#define SCTPCTL_PEER_CHKOH_DEFAULT 256
+
+/* maxburst: Default max burst for sctp endpoints */
+#define SCTPCTL_MAXBURST_DESC "Default max burst for sctp endpoints"
+#define SCTPCTL_MAXBURST_MIN 0
+#define SCTPCTL_MAXBURST_MAX 0xFFFFFFFF
+#define SCTPCTL_MAXBURST_DEFAULT SCTP_DEF_MAX_BURST
+
+/* fr_maxburst: Default max burst for sctp endpoints when fast retransmitting */
+#define SCTPCTL_FRMAXBURST_DESC "Default max burst for SCTP endpoints when fast retransmitting"
+#define SCTPCTL_FRMAXBURST_MIN 0
+#define SCTPCTL_FRMAXBURST_MAX 0xFFFFFFFF
+#define SCTPCTL_FRMAXBURST_DEFAULT SCTP_DEF_FRMAX_BURST
+
+
+/* maxchunks: Default max chunks on queue per asoc */
+#define SCTPCTL_MAXCHUNKS_DESC "Default max chunks on queue per asoc"
+#define SCTPCTL_MAXCHUNKS_MIN 0
+#define SCTPCTL_MAXCHUNKS_MAX 0xFFFFFFFF
+#define SCTPCTL_MAXCHUNKS_DEFAULT SCTP_ASOC_MAX_CHUNKS_ON_QUEUE
+
+/* tcbhashsize: Tunable for Hash table sizes */
+#define SCTPCTL_TCBHASHSIZE_DESC "Tunable for TCB hash table sizes"
+#define SCTPCTL_TCBHASHSIZE_MIN 1
+#define SCTPCTL_TCBHASHSIZE_MAX 0xFFFFFFFF
+#define SCTPCTL_TCBHASHSIZE_DEFAULT SCTP_TCBHASHSIZE
+
+/* pcbhashsize: Tunable for PCB Hash table sizes */
+#define SCTPCTL_PCBHASHSIZE_DESC "Tunable for PCB hash table sizes"
+#define SCTPCTL_PCBHASHSIZE_MIN 1
+#define SCTPCTL_PCBHASHSIZE_MAX 0xFFFFFFFF
+#define SCTPCTL_PCBHASHSIZE_DEFAULT SCTP_PCBHASHSIZE
+
+/* min_split_point: Minimum size when splitting a chunk */
+#define SCTPCTL_MIN_SPLIT_POINT_DESC "Minimum size when splitting a chunk"
+#define SCTPCTL_MIN_SPLIT_POINT_MIN 0
+#define SCTPCTL_MIN_SPLIT_POINT_MAX 0xFFFFFFFF
+#define SCTPCTL_MIN_SPLIT_POINT_DEFAULT SCTP_DEFAULT_SPLIT_POINT_MIN
+
+/* chunkscale: Tunable for Scaling of number of chunks and messages */
+#define SCTPCTL_CHUNKSCALE_DESC "Tunable for scaling of number of chunks and messages"
+#define SCTPCTL_CHUNKSCALE_MIN 1
+#define SCTPCTL_CHUNKSCALE_MAX 0xFFFFFFFF
+#define SCTPCTL_CHUNKSCALE_DEFAULT SCTP_CHUNKQUEUE_SCALE
+
+/* delayed_sack_time: Default delayed SACK timer in ms */
+#define SCTPCTL_DELAYED_SACK_TIME_DESC "Default delayed SACK timer in ms"
+#define SCTPCTL_DELAYED_SACK_TIME_MIN 0
+#define SCTPCTL_DELAYED_SACK_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_DELAYED_SACK_TIME_DEFAULT SCTP_RECV_MSEC
+
+/* sack_freq: Default SACK frequency */
+#define SCTPCTL_SACK_FREQ_DESC "Default SACK frequency"
+#define SCTPCTL_SACK_FREQ_MIN 0
+#define SCTPCTL_SACK_FREQ_MAX 0xFFFFFFFF
+#define SCTPCTL_SACK_FREQ_DEFAULT SCTP_DEFAULT_SACK_FREQ
+
+/* sys_resource: Max number of cached resources in the system */
+#define SCTPCTL_SYS_RESOURCE_DESC "Max number of cached resources in the system"
+#define SCTPCTL_SYS_RESOURCE_MIN 0
+#define SCTPCTL_SYS_RESOURCE_MAX 0xFFFFFFFF
+#define SCTPCTL_SYS_RESOURCE_DEFAULT SCTP_DEF_SYSTEM_RESC_LIMIT
+
+/* asoc_resource: Max number of cached resources in an asoc */
+#define SCTPCTL_ASOC_RESOURCE_DESC "Max number of cached resources in an asoc"
+#define SCTPCTL_ASOC_RESOURCE_MIN 0
+#define SCTPCTL_ASOC_RESOURCE_MAX 0xFFFFFFFF
+#define SCTPCTL_ASOC_RESOURCE_DEFAULT SCTP_DEF_ASOC_RESC_LIMIT
+
+/* heartbeat_interval: Default heartbeat interval in ms */
+#define SCTPCTL_HEARTBEAT_INTERVAL_DESC "Default heartbeat interval in ms"
+#define SCTPCTL_HEARTBEAT_INTERVAL_MIN 0
+#define SCTPCTL_HEARTBEAT_INTERVAL_MAX 0xFFFFFFFF
+#define SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT SCTP_HB_DEFAULT_MSEC
+
+/* pmtu_raise_time: Default PMTU raise timer in seconds */
+#define SCTPCTL_PMTU_RAISE_TIME_DESC "Default PMTU raise timer in seconds"
+#define SCTPCTL_PMTU_RAISE_TIME_MIN 0
+#define SCTPCTL_PMTU_RAISE_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_PMTU_RAISE_TIME_DEFAULT SCTP_DEF_PMTU_RAISE_SEC
+
+/* shutdown_guard_time: Default shutdown guard timer in seconds */
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DESC "Shutdown guard timer in seconds (0 means 5 times RTO.Max)"
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MIN 0
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT 0
+
+/* secret_lifetime: Default secret lifetime in seconds */
+#define SCTPCTL_SECRET_LIFETIME_DESC "Default secret lifetime in seconds"
+#define SCTPCTL_SECRET_LIFETIME_MIN 0
+#define SCTPCTL_SECRET_LIFETIME_MAX 0xFFFFFFFF
+#define SCTPCTL_SECRET_LIFETIME_DEFAULT SCTP_DEFAULT_SECRET_LIFE_SEC
+
+/* rto_max: Default maximum retransmission timeout in ms */
+#define SCTPCTL_RTO_MAX_DESC "Default maximum retransmission timeout in ms"
+#define SCTPCTL_RTO_MAX_MIN 0
+#define SCTPCTL_RTO_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_MAX_DEFAULT SCTP_RTO_UPPER_BOUND
+
+/* rto_min: Default minimum retransmission timeout in ms */
+#define SCTPCTL_RTO_MIN_DESC "Default minimum retransmission timeout in ms"
+#define SCTPCTL_RTO_MIN_MIN 0
+#define SCTPCTL_RTO_MIN_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_MIN_DEFAULT SCTP_RTO_LOWER_BOUND
+
+/* rto_initial: Default initial retransmission timeout in ms */
+#define SCTPCTL_RTO_INITIAL_DESC "Default initial retransmission timeout in ms"
+#define SCTPCTL_RTO_INITIAL_MIN 0
+#define SCTPCTL_RTO_INITIAL_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_INITIAL_DEFAULT SCTP_RTO_INITIAL
+
+/* init_rto_max: Default maximum retransmission timeout during association setup in ms */
+#define SCTPCTL_INIT_RTO_MAX_DESC "Default maximum retransmission timeout during association setup in ms"
+#define SCTPCTL_INIT_RTO_MAX_MIN 0
+#define SCTPCTL_INIT_RTO_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_INIT_RTO_MAX_DEFAULT SCTP_RTO_UPPER_BOUND
+
+/* valid_cookie_life: Default cookie lifetime in sec */
+#define SCTPCTL_VALID_COOKIE_LIFE_DESC "Default cookie lifetime in seconds"
+#define SCTPCTL_VALID_COOKIE_LIFE_MIN 0
+#define SCTPCTL_VALID_COOKIE_LIFE_MAX 0xFFFFFFFF
+#define SCTPCTL_VALID_COOKIE_LIFE_DEFAULT SCTP_DEFAULT_COOKIE_LIFE
+
+/* init_rtx_max: Default maximum number of retransmission for INIT chunks */
+#define SCTPCTL_INIT_RTX_MAX_DESC "Default maximum number of retransmissions for INIT chunks"
+#define SCTPCTL_INIT_RTX_MAX_MIN 0
+#define SCTPCTL_INIT_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_INIT_RTX_MAX_DEFAULT SCTP_DEF_MAX_INIT
+
+/* assoc_rtx_max: Default maximum number of retransmissions per association */
+#define SCTPCTL_ASSOC_RTX_MAX_DESC "Default maximum number of retransmissions per association"
+#define SCTPCTL_ASSOC_RTX_MAX_MIN 0
+#define SCTPCTL_ASSOC_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_ASSOC_RTX_MAX_DEFAULT SCTP_DEF_MAX_SEND
+
+/* path_rtx_max: Default maximum of retransmissions per path */
+#define SCTPCTL_PATH_RTX_MAX_DESC "Default maximum of retransmissions per path"
+#define SCTPCTL_PATH_RTX_MAX_MIN 0
+#define SCTPCTL_PATH_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_PATH_RTX_MAX_DEFAULT SCTP_DEF_MAX_PATH_RTX
+
+/* path_pf_threshold: threshold for considering the path potentially failed */
+#define SCTPCTL_PATH_PF_THRESHOLD_DESC "Default potentially failed threshold"
+#define SCTPCTL_PATH_PF_THRESHOLD_MIN 0
+#define SCTPCTL_PATH_PF_THRESHOLD_MAX 0xFFFF
+#define SCTPCTL_PATH_PF_THRESHOLD_DEFAULT SCTPCTL_PATH_PF_THRESHOLD_MAX
+
+/* add_more_on_output: When space-wise is it worthwhile to try to add more to a socket send buffer */
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DESC "When space-wise is it worthwhile to try to add more to a socket send buffer"
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MIN 0
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MAX 0xFFFFFFFF
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT SCTP_DEFAULT_ADD_MORE
+
+/* incoming_streams: Default number of incoming streams */
+#define SCTPCTL_INCOMING_STREAMS_DESC "Default number of incoming streams"
+#define SCTPCTL_INCOMING_STREAMS_MIN 1
+#define SCTPCTL_INCOMING_STREAMS_MAX 65535
+#define SCTPCTL_INCOMING_STREAMS_DEFAULT SCTP_ISTREAM_INITIAL
+
+/* outgoing_streams: Default number of outgoing streams */
+#define SCTPCTL_OUTGOING_STREAMS_DESC "Default number of outgoing streams"
+#define SCTPCTL_OUTGOING_STREAMS_MIN 1
+#define SCTPCTL_OUTGOING_STREAMS_MAX 65535
+#define SCTPCTL_OUTGOING_STREAMS_DEFAULT SCTP_OSTREAM_INITIAL
+
+/* cmt_on_off: CMT on/off flag */
+#define SCTPCTL_CMT_ON_OFF_DESC "CMT settings"
+#define SCTPCTL_CMT_ON_OFF_MIN SCTP_CMT_OFF
+#define SCTPCTL_CMT_ON_OFF_MAX SCTP_CMT_MAX
+#define SCTPCTL_CMT_ON_OFF_DEFAULT SCTP_CMT_OFF
+
+/* cmt_use_dac: CMT DAC on/off flag */
+#define SCTPCTL_CMT_USE_DAC_DESC "CMT DAC on/off flag"
+#define SCTPCTL_CMT_USE_DAC_MIN 0
+#define SCTPCTL_CMT_USE_DAC_MAX 1
+#define SCTPCTL_CMT_USE_DAC_DEFAULT 0
+
+/* cwnd_maxburst: Use a CWND adjusting to implement maxburst */
+#define SCTPCTL_CWND_MAXBURST_DESC "Adjust congestion control window to limit maximum burst when sending"
+#define SCTPCTL_CWND_MAXBURST_MIN 0
+#define SCTPCTL_CWND_MAXBURST_MAX 1
+#define SCTPCTL_CWND_MAXBURST_DEFAULT 1
+
+/* nat_friendly: SCTP NAT friendly operation */
+#define SCTPCTL_NAT_FRIENDLY_DESC "SCTP NAT friendly operation"
+#define SCTPCTL_NAT_FRIENDLY_MIN 0
+#define SCTPCTL_NAT_FRIENDLY_MAX 1
+#define SCTPCTL_NAT_FRIENDLY_DEFAULT 1
+
+/* abc_l_var: SCTP ABC max increase per SACK (L) */
+#define SCTPCTL_ABC_L_VAR_DESC "SCTP ABC max increase per SACK (L)"
+#define SCTPCTL_ABC_L_VAR_MIN 0
+#define SCTPCTL_ABC_L_VAR_MAX 0xFFFFFFFF
+#define SCTPCTL_ABC_L_VAR_DEFAULT 2
+
+/* max_chained_mbufs: Default max number of small mbufs on a chain */
+#define SCTPCTL_MAX_CHAINED_MBUFS_DESC "Default max number of small mbufs on a chain"
+#define SCTPCTL_MAX_CHAINED_MBUFS_MIN 0
+#define SCTPCTL_MAX_CHAINED_MBUFS_MAX 0xFFFFFFFF
+#define SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT SCTP_DEFAULT_MBUFS_IN_CHAIN
+
+/* do_sctp_drain: Should SCTP respond to the drain calls */
+#define SCTPCTL_DO_SCTP_DRAIN_DESC "Should SCTP respond to the drain calls"
+#define SCTPCTL_DO_SCTP_DRAIN_MIN 0
+#define SCTPCTL_DO_SCTP_DRAIN_MAX 1
+#define SCTPCTL_DO_SCTP_DRAIN_DEFAULT 1
+
+/* hb_max_burst: Confirmation Heartbeat max burst? */
+#define SCTPCTL_HB_MAX_BURST_DESC "Confirmation Heartbeat max burst"
+#define SCTPCTL_HB_MAX_BURST_MIN 1
+#define SCTPCTL_HB_MAX_BURST_MAX 0xFFFFFFFF
+#define SCTPCTL_HB_MAX_BURST_DEFAULT SCTP_DEF_HBMAX_BURST
+
+/* abort_at_limit: When one-2-one hits qlimit abort */
+#define SCTPCTL_ABORT_AT_LIMIT_DESC "Abort when one-to-one hits qlimit"
+#define SCTPCTL_ABORT_AT_LIMIT_MIN 0
+#define SCTPCTL_ABORT_AT_LIMIT_MAX 1
+#define SCTPCTL_ABORT_AT_LIMIT_DEFAULT 0
+
+/* min_residual: min residual in a data fragment leftover */
+#define SCTPCTL_MIN_RESIDUAL_DESC "Minimum residual data chunk in second part of split"
+#define SCTPCTL_MIN_RESIDUAL_MIN 20
+#define SCTPCTL_MIN_RESIDUAL_MAX 65535
+#define SCTPCTL_MIN_RESIDUAL_DEFAULT 1452
+
+/* max_retran_chunk: max chunk retransmissions */
+#define SCTPCTL_MAX_RETRAN_CHUNK_DESC "Maximum times an unlucky chunk can be retransmitted before assoc abort"
+#define SCTPCTL_MAX_RETRAN_CHUNK_MIN 0
+#define SCTPCTL_MAX_RETRAN_CHUNK_MAX 65535
+#define SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT 30
+
+/* sctp_logging: This gives us logging when the options are enabled */
+#define SCTPCTL_LOGGING_LEVEL_DESC "Ltrace/KTR trace logging level"
+#define SCTPCTL_LOGGING_LEVEL_MIN 0
+#define SCTPCTL_LOGGING_LEVEL_MAX 0xffffffff
+#define SCTPCTL_LOGGING_LEVEL_DEFAULT 0
+
+/* JRS - default congestion control module sysctl */
+#define SCTPCTL_DEFAULT_CC_MODULE_DESC "Default congestion control module"
+#define SCTPCTL_DEFAULT_CC_MODULE_MIN 0
+#define SCTPCTL_DEFAULT_CC_MODULE_MAX 2
+#define SCTPCTL_DEFAULT_CC_MODULE_DEFAULT 0
+
+/* RS - default stream scheduling module sysctl */
+#define SCTPCTL_DEFAULT_SS_MODULE_DESC "Default stream scheduling module"
+#define SCTPCTL_DEFAULT_SS_MODULE_MIN 0
+#define SCTPCTL_DEFAULT_SS_MODULE_MAX 5
+#define SCTPCTL_DEFAULT_SS_MODULE_DEFAULT 0
+
+/* RRS - default fragment interleave */
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC "Default fragment interleave level"
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN 0
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX 2
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT 1
+
+/* mobility_base: Enable SCTP mobility support */
+#define SCTPCTL_MOBILITY_BASE_DESC "Enable SCTP base mobility"
+#define SCTPCTL_MOBILITY_BASE_MIN 0
+#define SCTPCTL_MOBILITY_BASE_MAX 1
+#define SCTPCTL_MOBILITY_BASE_DEFAULT 0
+
+/* mobility_fasthandoff: Enable SCTP fast handoff support */
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DESC "Enable SCTP fast handoff"
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MIN 0
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MAX 1
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT 0
+
+/* Enable SCTP/UDP tunneling port */
+#define SCTPCTL_UDP_TUNNELING_PORT_DESC "Set the SCTP/UDP tunneling port"
+#define SCTPCTL_UDP_TUNNELING_PORT_MIN 0
+#define SCTPCTL_UDP_TUNNELING_PORT_MAX 65535
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT 0
+#else
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT SCTP_OVER_UDP_TUNNELING_PORT
+#endif
+
+/* Enable sending of the SACK-IMMEDIATELY bit */
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC "Enable sending of the SACK-IMMEDIATELY-bit"
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN 0
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX 1
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX
+
+/* Enable sending of the NAT-FRIENDLY message */
+#define SCTPCTL_NAT_FRIENDLY_INITS_DESC "Enable sending of the nat-friendly SCTP option on INITs"
+#define SCTPCTL_NAT_FRIENDLY_INITS_MIN 0
+#define SCTPCTL_NAT_FRIENDLY_INITS_MAX 1
+#define SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT SCTPCTL_NAT_FRIENDLY_INITS_MIN
+
+/* Vtag time wait in seconds */
+#define SCTPCTL_TIME_WAIT_DESC "Vtag time wait time in seconds, 0 disables it"
+#define SCTPCTL_TIME_WAIT_MIN 0
+#define SCTPCTL_TIME_WAIT_MAX 0xffffffff
+#define SCTPCTL_TIME_WAIT_DEFAULT SCTP_TIME_WAIT
+
+/* Enable Send/Receive buffer splitting */
+#define SCTPCTL_BUFFER_SPLITTING_DESC "Enable send/receive buffer splitting"
+#define SCTPCTL_BUFFER_SPLITTING_MIN 0
+#define SCTPCTL_BUFFER_SPLITTING_MAX 0x3
+#define SCTPCTL_BUFFER_SPLITTING_DEFAULT SCTPCTL_BUFFER_SPLITTING_MIN
+
+/* Initial congestion window in MTUs */
+#define SCTPCTL_INITIAL_CWND_DESC "Defines the initial congestion window size in MTUs"
+#define SCTPCTL_INITIAL_CWND_MIN 0
+#define SCTPCTL_INITIAL_CWND_MAX 0xffffffff
+#define SCTPCTL_INITIAL_CWND_DEFAULT 3
+
+/* rttvar smooth avg for bw calc */
+#define SCTPCTL_RTTVAR_BW_DESC "Shift amount DCCC uses for bw smoothing on rtt calc"
+#define SCTPCTL_RTTVAR_BW_MIN 0
+#define SCTPCTL_RTTVAR_BW_MAX 32
+#define SCTPCTL_RTTVAR_BW_DEFAULT 4
+
+/* rttvar smooth avg for bw calc */
+#define SCTPCTL_RTTVAR_RTT_DESC "Shift amount DCCC uses for rtt smoothing on rtt calc"
+#define SCTPCTL_RTTVAR_RTT_MIN 0
+#define SCTPCTL_RTTVAR_RTT_MAX 32
+#define SCTPCTL_RTTVAR_RTT_DEFAULT 5
+
+#define SCTPCTL_RTTVAR_EQRET_DESC "Whether DCCC increases cwnd when the rtt and bw are unchanged"
+#define SCTPCTL_RTTVAR_EQRET_MIN 0
+#define SCTPCTL_RTTVAR_EQRET_MAX 1
+#define SCTPCTL_RTTVAR_EQRET_DEFAULT 0
+
+#define SCTPCTL_RTTVAR_STEADYS_DESC "Number of identical bw measurements DCCC takes to try step down of cwnd"
+#define SCTPCTL_RTTVAR_STEADYS_MIN 0
+#define SCTPCTL_RTTVAR_STEADYS_MAX 0xFFFF
+#define SCTPCTL_RTTVAR_STEADYS_DEFAULT 20 /* 0 means disable feature */
+
+#define SCTPCTL_RTTVAR_DCCCECN_DESC "Enable ECN for DCCC."
+#define SCTPCTL_RTTVAR_DCCCECN_MIN 0
+#define SCTPCTL_RTTVAR_DCCCECN_MAX 1
+#define SCTPCTL_RTTVAR_DCCCECN_DEFAULT 1 /* 0 means disable feature */
+
+#define SCTPCTL_BLACKHOLE_DESC "Enable SCTP blackholing, see blackhole(4) for more details"
+#define SCTPCTL_BLACKHOLE_MIN 0
+#define SCTPCTL_BLACKHOLE_MAX 2
+#define SCTPCTL_BLACKHOLE_DEFAULT SCTPCTL_BLACKHOLE_MIN
+
+/* sendall_limit: Maximum message with SCTP_SENDALL */
+#define SCTPCTL_SENDALL_LIMIT_DESC "Maximum size of a message send with SCTP_SENDALL"
+#define SCTPCTL_SENDALL_LIMIT_MIN 0
+#define SCTPCTL_SENDALL_LIMIT_MAX 0xFFFFFFFF
+#define SCTPCTL_SENDALL_LIMIT_DEFAULT 1432
+
+#define SCTPCTL_DIAG_INFO_CODE_DESC "Diagnostic information error cause code"
+#define SCTPCTL_DIAG_INFO_CODE_MIN 0
+#define SCTPCTL_DIAG_INFO_CODE_MAX 65535
+#define SCTPCTL_DIAG_INFO_CODE_DEFAULT 0
+
+#if defined(SCTP_DEBUG)
+/* debug: Configure debug output */
+#define SCTPCTL_DEBUG_DESC "Configure debug output"
+#define SCTPCTL_DEBUG_MIN 0
+#define SCTPCTL_DEBUG_MAX 0xFFFFFFFF
+#define SCTPCTL_DEBUG_DEFAULT 0
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#define SCTPCTL_MAIN_TIMER_DESC "Main timer interval in ms"
+#define SCTPCTL_MAIN_TIMER_MIN 1
+#define SCTPCTL_MAIN_TIMER_MAX 0xFFFFFFFF
+#define SCTPCTL_MAIN_TIMER_DEFAULT 10
+
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_DESC "Ignore VMware Interfaces"
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_MIN 0
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX 1
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#define SCTPCTL_OUTPUT_UNLOCKED_DESC "Unlock socket when sending packets down to IP"
+#define SCTPCTL_OUTPUT_UNLOCKED_MIN 0
+#define SCTPCTL_OUTPUT_UNLOCKED_MAX 1
+#define SCTPCTL_OUTPUT_UNLOCKED_DEFAULT SCTPCTL_OUTPUT_UNLOCKED_MIN
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#define SCTPCTL_ADDR_WATCHDOG_LIMIT_DESC "Address watchdog limit"
+#define SCTPCTL_ADDR_WATCHDOG_LIMIT_MIN 0
+#define SCTPCTL_ADDR_WATCHDOG_LIMIT_MAX 0xFFFFFFFF
+#define SCTPCTL_ADDR_WATCHDOG_LIMIT_DEFAULT SCTPCTL_ADDR_WATCHDOG_LIMIT_MIN
+
+#define SCTPCTL_VTAG_WATCHDOG_LIMIT_DESC "VTag watchdog limit"
+#define SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN 0
+#define SCTPCTL_VTAG_WATCHDOG_LIMIT_MAX 0xFFFFFFFF
+#define SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+#if defined(SYSCTL_DECL)
+SYSCTL_DECL(_net_inet_sctp);
+#endif
+#endif
+
+void sctp_init_sysctls(void);
+#if defined(_WIN32) && !defined(__Userspace__)
+void sctp_finish_sysctls(void);
+#endif
+
+#endif /* _KERNEL */
+#endif /* __sctp_sysctl_h__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.c
new file mode 100644
index 000000000..ad062b35e
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.c
@@ -0,0 +1,1633 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 362054 2020-06-11 13:34:09Z tuexen $");
+#endif
+
+#define _IP_VHL
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#ifdef INET6
+#if defined(__FreeBSD__) && defined(__Userspace__)
+#include <netinet6/sctp6_var.h>
+#endif
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+#if defined(INET) || defined(INET6)
+#if !(defined(_WIN32) && defined(__Userspace__))
+#include <netinet/udp.h>
+#endif
+#endif
+
+void
+sctp_audit_retranmission_queue(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+ asoc->sent_queue_retran_cnt = 0;
+ asoc->sent_queue_cnt = 0;
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ asoc->sent_queue_cnt++;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+}
+
+static int
+sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint16_t threshold)
+{
+ if (net) {
+ net->error_count++;
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
+ (void *)net, net->error_count,
+ net->failure_threshold);
+ if (net->error_count > net->failure_threshold) {
+ /* We had a threshold failure */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+ net->dest_state &= ~SCTP_ADDR_PF;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, 0,
+ (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ } else if ((net->pf_threshold < net->failure_threshold) &&
+ (net->error_count > net->pf_threshold)) {
+ if (!(net->dest_state & SCTP_ADDR_PF)) {
+ net->dest_state |= SCTP_ADDR_PF;
+ net->last_active = sctp_get_tick_count();
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ inp, stcb, net,
+ SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ }
+ }
+ if (stcb == NULL)
+ return (0);
+
+ if (net) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_INCR,
+ stcb->asoc.overall_error_count,
+ (stcb->asoc.overall_error_count+1),
+ SCTP_FROM_SCTP_TIMER,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count++;
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_INCR,
+ stcb->asoc.overall_error_count,
+ (stcb->asoc.overall_error_count+1),
+ SCTP_FROM_SCTP_TIMER,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count++;
+ }
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
+ (void *)&stcb->asoc, stcb->asoc.overall_error_count,
+ (uint32_t)threshold,
+ ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
+ /*
+ * We specifically do not do >= to give the assoc one more change
+ * before we fail it.
+ */
+ if (stcb->asoc.overall_error_count > threshold) {
+ /* Abort notification sends a ULP notify */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Association error counter exceeded");
+ inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * sctp_find_alternate_net() returns a non-NULL pointer as long
+ * the argument net is non-NULL.
+ */
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int mode)
+{
+ /* Find and return an alternate network if possible */
+ struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL;
+ int once;
+ /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
+ int min_errors = -1;
+ uint32_t max_cwnd = 0;
+
+ if (stcb->asoc.numnets == 1) {
+ /* No others but net */
+ return (TAILQ_FIRST(&stcb->asoc.nets));
+ }
+ /*
+ * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm.
+ * This algorithm chooses the active destination (not in PF state) with the largest
+ * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose
+ * the desination that is in PF state with the lowest error count. In case of a tie,
+ * choose the destination that was most recently active.
+ */
+ if (mode == 2) {
+ TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+ /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */
+ if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+ (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ continue;
+ }
+ /*
+ * JRS 5/14/07 - If the destination is reachable but in PF state, compare
+ * the error count of the destination to the minimum error count seen thus far.
+ * Store the destination with the lower error count. If the error counts are
+ * equal, store the destination that was most recently active.
+ */
+ if (mnet->dest_state & SCTP_ADDR_PF) {
+ /*
+ * JRS 5/14/07 - If the destination under consideration is the current
+ * destination, work as if the error count is one higher. The
+ * actual error count will not be incremented until later in the
+ * t3 handler.
+ */
+ if (mnet == net) {
+ if (min_errors == -1) {
+ min_errors = mnet->error_count + 1;
+ min_errors_net = mnet;
+ } else if (mnet->error_count + 1 < min_errors) {
+ min_errors = mnet->error_count + 1;
+ min_errors_net = mnet;
+ } else if (mnet->error_count + 1 == min_errors
+ && mnet->last_active > min_errors_net->last_active) {
+ min_errors_net = mnet;
+ min_errors = mnet->error_count + 1;
+ }
+ continue;
+ } else {
+ if (min_errors == -1) {
+ min_errors = mnet->error_count;
+ min_errors_net = mnet;
+ } else if (mnet->error_count < min_errors) {
+ min_errors = mnet->error_count;
+ min_errors_net = mnet;
+ } else if (mnet->error_count == min_errors
+ && mnet->last_active > min_errors_net->last_active) {
+ min_errors_net = mnet;
+ min_errors = mnet->error_count;
+ }
+ continue;
+ }
+ }
+ /*
+ * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the
+ * cwnd of the destination to the highest cwnd seen thus far. Store the
+ * destination with the higher cwnd value. If the cwnd values are equal,
+ * randomly choose one of the two destinations.
+ */
+ if (max_cwnd < mnet->cwnd) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ } else if (max_cwnd == mnet->cwnd) {
+ uint32_t rndval;
+ uint8_t this_random;
+
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ if (this_random % 2 == 1) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd; /* Useless? */
+ }
+ }
+ }
+ if (max_cwnd_net == NULL) {
+ if (min_errors_net == NULL) {
+ return (net);
+ }
+ return (min_errors_net);
+ } else {
+ return (max_cwnd_net);
+ }
+ } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */
+ else if (mode == 1) {
+ TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+ if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+ (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ /*
+ * will skip ones that are not-reachable or
+ * unconfirmed
+ */
+ continue;
+ }
+ if (max_cwnd < mnet->cwnd) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ } else if (max_cwnd == mnet->cwnd) {
+ uint32_t rndval;
+ uint8_t this_random;
+
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ if (this_random % 2) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ }
+ }
+ }
+ if (max_cwnd_net) {
+ return (max_cwnd_net);
+ }
+ }
+ mnet = net;
+ once = 0;
+
+ if (mnet == NULL) {
+ mnet = TAILQ_FIRST(&stcb->asoc.nets);
+ if (mnet == NULL) {
+ return (NULL);
+ }
+ }
+ for (;;) {
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ if (alt == NULL) {
+ return (NULL);
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (alt->ro.ro_nh == NULL) {
+#else
+ if (alt->ro.ro_rt == NULL) {
+#endif
+ if (alt->ro._s_addr) {
+ sctp_free_ifa(alt->ro._s_addr);
+ alt->ro._s_addr = NULL;
+ }
+ alt->src_addr_selected = 0;
+ }
+ if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ (alt->ro.ro_nh != NULL) &&
+#else
+ (alt->ro.ro_rt != NULL) &&
+#endif
+ (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+ /* Found a reachable address */
+ break;
+ }
+ mnet = alt;
+ }
+
+ if (alt == NULL) {
+ /* Case where NO insv network exists (dormant state) */
+ /* we rotate destinations */
+ once = 0;
+ mnet = net;
+ for (;;) {
+ if (mnet == NULL) {
+ return (TAILQ_FIRST(&stcb->asoc.nets));
+ }
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ if (alt == NULL) {
+ break;
+ }
+ }
+ if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (alt != net)) {
+ /* Found an alternate address */
+ break;
+ }
+ mnet = alt;
+ }
+ }
+ if (alt == NULL) {
+ return (net);
+ }
+ return (alt);
+}
+
+static void
+sctp_backoff_on_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int win_probe,
+ int num_marked, int num_abandoned)
+{
+ if (net->RTO == 0) {
+ if (net->RTO_measured) {
+ net->RTO = stcb->asoc.minrto;
+ } else {
+ net->RTO = stcb->asoc.initial_rto;
+ }
+ }
+ net->RTO <<= 1;
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ if ((win_probe == 0) && (num_marked || num_abandoned)) {
+ /* We don't apply penalty to window probe scenarios */
+ /* JRS - Use the congestion control given in the CC module */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
+ }
+}
+
+#ifndef INVARIANTS
+static void
+sctp_recover_sent_list(struct sctp_tcb *stcb)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_association *asoc;
+
+ asoc = &stcb->asoc;
+ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) {
+ SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
+ (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq);
+ if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+ }
+ }
+ if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) {
+ asoc->trigger_reset = 1;
+ }
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (PR_SCTP_ENABLED(chk->flags)) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if (chk->data) {
+ /*sa_ignore NO_NULL_CHK*/
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ SCTP_PRINTF("after recover order is as follows\n");
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn);
+ }
+}
+#endif
+
+static int
+sctp_mark_all_for_resend(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct sctp_nets *alt,
+ int window_probe,
+ int *num_marked,
+ int *num_abandoned)
+{
+
+ /*
+ * Mark all chunks (well not all) that were sent to *net for
+ * retransmission. Move them to alt for there destination as well...
+ * We only mark chunks that have been outstanding long enough to
+ * have received feed-back.
+ */
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_nets *lnets;
+ struct timeval now, min_wait, tv;
+ int cur_rto;
+ int cnt_abandoned;
+ int audit_tf, num_mk, fir;
+ unsigned int cnt_mk;
+ uint32_t orig_flight, orig_tf;
+ uint32_t tsnlast, tsnfirst;
+ int recovery_cnt = 0;
+
+
+ /* none in flight now */
+ audit_tf = 0;
+ fir = 0;
+ /*
+ * figure out how long a data chunk must be pending before we can
+ * mark it ..
+ */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* get cur rto in micro-seconds */
+ cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+ cur_rto *= 1000;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(cur_rto,
+ stcb->asoc.peers_rwnd,
+ window_probe,
+ SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT);
+ sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
+ }
+ tv.tv_sec = cur_rto / 1000000;
+ tv.tv_usec = cur_rto % 1000000;
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ timersub(&now, &tv, &min_wait);
+#else
+ min_wait = now;
+ timevalsub(&min_wait, &tv);
+#endif
+ if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+ /*
+ * if we hit here, we don't have enough seconds on the clock
+ * to account for the RTO. We just let the lower seconds be
+ * the bounds and don't worry about it. This may mean we
+ * will mark a lot more than we should.
+ */
+ min_wait.tv_sec = min_wait.tv_usec = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
+ }
+ /*
+ * Our rwnd will be incorrect here since we are not adding back the
+ * cnt * mbuf but we will fix that down below.
+ */
+ orig_flight = net->flight_size;
+ orig_tf = stcb->asoc.total_flight;
+
+ net->fast_retran_ip = 0;
+ /* Now on to each chunk */
+ cnt_abandoned = 0;
+ num_mk = cnt_mk = 0;
+ tsnfirst = tsnlast = 0;
+#ifndef INVARIANTS
+ start_again:
+#endif
+ TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
+ if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) {
+ /* Strange case our list got out of order? */
+ SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n",
+ (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn);
+ recovery_cnt++;
+#ifdef INVARIANTS
+ panic("last acked >= chk on sent-Q");
+#else
+ SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
+ sctp_recover_sent_list(stcb);
+ if (recovery_cnt < 10) {
+ goto start_again;
+ } else {
+ SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
+ }
+#endif
+ }
+ if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
+ /*
+ * found one to mark: If it is less than
+ * DATAGRAM_ACKED it MUST not be a skipped or marked
+ * TSN but instead one that is either already set
+ * for retransmission OR one that needs
+ * retransmission.
+ */
+
+ /* validate its been outstanding long enough */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(chk->rec.data.tsn,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_MARK_TIME);
+ }
+ if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
+ /*
+ * we have reached a chunk that was sent
+ * some seconds past our min.. forget it we
+ * will find no more to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(0,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_STOPPED);
+ }
+ continue;
+ } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
+ (window_probe == 0)) {
+ /*
+ * we must look at the micro seconds to
+ * know.
+ */
+ if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+ /*
+ * ok it was sent after our boundary
+ * time.
+ */
+ continue;
+ }
+ }
+ if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) {
+ /* Is it expired? */
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (timercmp(&now, &chk->rec.data.timetodrop, >)) {
+#else
+ if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) {
+#endif
+ /* Yes so drop it */
+ if (chk->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ 1,
+ SCTP_SO_NOT_LOCKED);
+ cnt_abandoned++;
+ }
+ continue;
+ }
+ }
+ if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) {
+ /* Has it been retransmitted tv_sec times? */
+ if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
+ if (chk->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ 1,
+ SCTP_SO_NOT_LOCKED);
+ cnt_abandoned++;
+ }
+ continue;
+ }
+ }
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ num_mk++;
+ if (fir == 0) {
+ fir = 1;
+ tsnfirst = chk->rec.data.tsn;
+ }
+ tsnlast = chk->rec.data.tsn;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(chk->rec.data.tsn, chk->snd_count,
+ 0, SCTP_FR_T3_MARKED);
+ }
+
+ if (chk->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ chk->whoTo->cwnd -= chk->book_size;
+ chk->rec.data.chunk_was_revoked = 0;
+ }
+ net->marked_retrans++;
+ stcb->asoc.marked_retrans++;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uint32_t)(uintptr_t)chk->whoTo,
+ chk->rec.data.tsn);
+ }
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ stcb->asoc.peers_rwnd += chk->send_size;
+ stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+ }
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ SCTP_STAT_INCR(sctps_markedretrans);
+
+ /* reset the TSN for striking and other FR stuff */
+ chk->rec.data.doing_fast_retransmit = 0;
+ /* Clear any time so NO RTT is being done */
+
+ if (chk->do_rtt) {
+ if (chk->whoTo->rto_needed == 0) {
+ chk->whoTo->rto_needed = 1;
+ }
+ }
+ chk->do_rtt = 0;
+ if (alt != net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->no_fr_allowed = 1;
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ } else {
+ chk->no_fr_allowed = 0;
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
+ }
+ }
+ /* CMT: Do not allow FRs on retransmitted TSNs.
+ */
+ if (stcb->asoc.sctp_cmt_on_off > 0) {
+ chk->no_fr_allowed = 1;
+ }
+#ifdef THIS_SHOULD_NOT_BE_DONE
+ } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
+ /* remember highest acked one */
+ could_be_sent = chk;
+#endif
+ }
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ cnt_mk++;
+ }
+ }
+ if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
+ /* we did not subtract the same things? */
+ audit_tf = 1;
+ }
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
+ }
+#ifdef SCTP_DEBUG
+ if (num_mk) {
+ SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
+ tsnlast);
+ SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%u\n",
+ num_mk,
+ stcb->asoc.peers_rwnd);
+ }
+#endif
+ *num_marked = num_mk;
+ *num_abandoned = cnt_abandoned;
+ /* Now check for a ECN Echo that may be stranded And
+ * include the cnt_mk'd to have all resends in the
+ * control queue.
+ */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ cnt_mk++;
+ }
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cnt_mk++;
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+#ifdef THIS_SHOULD_NOT_BE_DONE
+ if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
+ /* fix it so we retransmit the highest acked anyway */
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cnt_mk++;
+ could_be_sent->sent = SCTP_DATAGRAM_RESEND;
+ }
+#endif
+ if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
+#ifdef INVARIANTS
+ SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
+ cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
+#endif
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = cnt_mk;
+#endif
+ }
+ if (audit_tf) {
+ SCTPDBG(SCTP_DEBUG_TIMER4,
+ "Audit total flight due to negative value net:%p\n",
+ (void *)net);
+ stcb->asoc.total_flight = 0;
+ stcb->asoc.total_flight_count = 0;
+ /* Clear all networks flight size */
+ TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
+ lnets->flight_size = 0;
+ SCTPDBG(SCTP_DEBUG_TIMER4,
+ "Net:%p c-f cwnd:%d ssthresh:%d\n",
+ (void *)lnets, lnets->cwnd, lnets->ssthresh);
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uint32_t)(uintptr_t)chk->whoTo,
+ chk->rec.data.tsn);
+ }
+
+ sctp_flight_size_increase(chk);
+ sctp_total_flight_increase(stcb, chk);
+ }
+ }
+ }
+ /* We return 1 if we only have a window probe outstanding */
+ return (0);
+}
+
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ int win_probe, num_mk, num_abandoned;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if (net == lnet) {
+ sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
+ } else {
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
+ }
+ }
+ }
+ /* Find an alternate and mark those for retransmission */
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight < net->mtu)) {
+ SCTP_STAT_INCR(sctps_timowindowprobe);
+ win_probe = 1;
+ } else {
+ win_probe = 0;
+ }
+
+ if (win_probe == 0) {
+ /* We don't do normal threshold management on window probes */
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ } else {
+ if (net != stcb->asoc.primary_destination) {
+ /* send a immediate HB if our RTO is stale */
+ struct timeval now;
+ unsigned int ms_goneby;
+
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ if (net->last_sent_time.tv_sec) {
+ ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
+ } else {
+ ms_goneby = 0;
+ }
+ if ((net->dest_state & SCTP_ADDR_PF) == 0) {
+ if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
+ /*
+ * no recent feed back in an RTO or
+ * more, request a RTT update
+ */
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ }
+ }
+ } else {
+ /*
+ * For a window probe we don't penalize the net's but only
+ * the association. This may fail it if SACKs are not coming
+ * back. If sack's are coming with rwnd locked at 0, we will
+ * continue to hold things waiting for rwnd to raise
+ */
+ if (sctp_threshold_management(inp, stcb, NULL,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ }
+ if (stcb->asoc.sctp_cmt_on_off > 0) {
+ if (net->pf_threshold < net->failure_threshold) {
+ alt = sctp_find_alternate_net(stcb, net, 2);
+ } else {
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT.
+ * If CMT is being used, then pick dest with
+ * largest ssthresh for any retransmission.
+ */
+ alt = sctp_find_alternate_net(stcb, net, 1);
+ /*
+ * CUCv2: If a different dest is picked for
+ * the retransmission, then new
+ * (rtx-)pseudo_cumack needs to be tracked
+ * for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+ }
+ } else {
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ }
+
+ num_mk = 0;
+ num_abandoned = 0;
+ (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
+ &num_mk, &num_abandoned);
+ /* FR Loss recovery just ended with the T3. */
+ stcb->asoc.fast_retran_loss_recovery = 0;
+
+ /* CMT FR loss recovery ended with the T3 */
+ net->fast_retran_loss_recovery = 0;
+ if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
+ (net->flight_size == 0)) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
+ }
+
+ /*
+ * setup the sat loss recovery that prevents satellite cwnd advance.
+ */
+ stcb->asoc.sat_t3_loss_recovery = 1;
+ stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
+
+ /* Backoff the timer and cwnd */
+ sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
+ if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
+ (net->dest_state & SCTP_ADDR_PF)) {
+ /* Move all pending over too */
+ sctp_move_chunks_from_net(stcb, net);
+
+ /* Get the address that failed, to
+ * force a new src address selecton and
+ * a route allocation.
+ */
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+
+ /* Force a route allocation too */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ RO_NHFREE(&net->ro);
+#else
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+#endif
+
+ /* Was it our primary? */
+ if ((stcb->asoc.primary_destination == net) && (alt != net)) {
+ /*
+ * Yes, note it as such and find an alternate note:
+ * this means HB code must use this to resent the
+ * primary if it goes active AND if someone does a
+ * change-primary then this flag must be cleared
+ * from any net structures.
+ */
+ if (stcb->asoc.alternate) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ }
+ stcb->asoc.alternate = alt;
+ atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
+ }
+ }
+ /*
+ * Special case for cookie-echo'ed case, we don't do output but must
+ * await the COOKIE-ACK before retransmission
+ */
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
+ /*
+ * Here we just reset the timer and start again since we
+ * have not established the asoc
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ return (0);
+ }
+ if (stcb->asoc.prsctp_supported) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) {
+ send_forward_tsn(stcb, &stcb->asoc);
+ for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
+ if (lchk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (lchk != NULL) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+ return (0);
+}
+
+int
+sctp_t1init_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* bump the thresholds */
+ if (stcb->asoc.delayed_connection) {
+ /*
+ * special hook for delayed connection. The library did NOT
+ * complete the rest of its sends.
+ */
+ stcb->asoc.delayed_connection = 0;
+ sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+ return (0);
+ }
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) {
+ return (0);
+ }
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_init_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
+ if (stcb->asoc.initial_init_rto_max < net->RTO) {
+ net->RTO = stcb->asoc.initial_init_rto_max;
+ }
+ if (stcb->asoc.numnets > 1) {
+ /* If we have more than one addr use it */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
+ if (alt != stcb->asoc.primary_destination) {
+ sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination);
+ stcb->asoc.primary_destination = alt;
+ }
+ }
+ /* Send out a new init */
+ sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+ return (0);
+}
+
+/*
+ * For cookie and asconf we actually need to find and mark for resend, then
+ * increment the resend counter (after all the threshold management stuff of
+ * course).
+ */
+int
+sctp_cookie_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net SCTP_UNUSED)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *cookie;
+
+ /* first before all else we must find the cookie */
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie == NULL) {
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
+ /* FOOBAR! */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Cookie timer expired, but no cookie");
+ inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ } else {
+#ifdef INVARIANTS
+ panic("Cookie timer expires in wrong state?");
+#else
+ SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(stcb));
+ return (0);
+#endif
+ }
+ return (0);
+ }
+ /* Ok we found the cookie, threshold management next */
+ if (sctp_threshold_management(inp, stcb, cookie->whoTo,
+ stcb->asoc.max_init_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * Cleared threshold management, now lets backoff the address
+ * and select an alternate
+ */
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
+ if (alt != cookie->whoTo) {
+ sctp_free_remote_addr(cookie->whoTo);
+ cookie->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ /* Now mark the retran info */
+ if (cookie->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ cookie->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ /*
+ * Now call the output routine to kick out the cookie again, Note we
+ * don't mark any chunks for retran so that FR will need to kick in
+ * to move these (or a send timer).
+ */
+ return (0);
+}
+
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ struct sctp_nets *alt, *net;
+ struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
+
+ if (stcb->asoc.stream_reset_outstanding == 0) {
+ return (0);
+ }
+ /* find the existing STRRESET, we use the seq number we sent out on */
+ (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
+ if (strrst == NULL) {
+ return (0);
+ }
+ net = strrst->whoTo;
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * Cleared threshold management, now lets backoff the address
+ * and select an alternate
+ */
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ strrst->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+
+ /* See if a ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+ /*
+ * If the address went un-reachable, we need to move to
+ * alternates for ALL chk's in queue
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ sctp_free_remote_addr(net);
+
+ /* mark the retran info */
+ if (strrst->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ strrst->sent = SCTP_DATAGRAM_RESEND;
+ strrst->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, alt);
+ return (0);
+}
+
+int
+sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *asconf, *chk;
+
+ /* is this a first send, or a retransmission? */
+ if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
+ /* compose a new ASCONF chunk and send it */
+ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+ } else {
+ /*
+ * Retransmission of the existing ASCONF is needed
+ */
+
+ /* find the existing ASCONF */
+ asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
+ if (asconf == NULL) {
+ return (0);
+ }
+ net = asconf->whoTo;
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ if (asconf->snd_count > stcb->asoc.max_send_times) {
+ /*
+ * Something is rotten: our peer is not responding to
+ * ASCONFs but apparently is to other chunks. i.e. it
+ * is not properly handling the chunk type upper bits.
+ * Mark this peer as ASCONF incapable and cleanup.
+ */
+ SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
+ sctp_asconf_cleanup(stcb);
+ return (0);
+ }
+ /*
+ * cleared threshold management, so now backoff the net and
+ * select an alternate
+ */
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (asconf->whoTo != alt) {
+ asconf->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+
+ /* See if an ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) {
+ if (chk->whoTo != alt) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+ /*
+ * If the address went un-reachable, we need to move
+ * to the alternate for ALL chunks in queue
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ sctp_free_remote_addr(net);
+
+ /* mark the retran info */
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+ asconf->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+
+ /* send another ASCONF if any and we can do */
+ sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
+ }
+ return (0);
+}
+
+/* Mobility adaptation */
+void
+sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ if (stcb->asoc.deleted_primary == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ return;
+ }
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+ sctp_free_remote_addr(stcb->asoc.deleted_primary);
+ stcb->asoc.deleted_primary = NULL;
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ return;
+}
+
+/*
+ * For the shutdown and shutdown-ack, we do not keep one around on the
+ * control queue. This means we must generate a new one and call the general
+ * chunk output routine, AFTER having done threshold management.
+ * It is assumed that net is non-NULL.
+ */
+int
+sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold management */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ sctp_send_shutdown(stcb, alt);
+
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
+ return (0);
+}
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold management */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ sctp_send_shutdown_ack(stcb, alt);
+
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
+ return (0);
+}
+
+static void
+sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_stream_queue_pending *sp;
+ unsigned int i, chks_in_queue = 0;
+ int being_filled = 0;
+ /*
+ * This function is ONLY called when the send/sent queues are empty.
+ */
+ if ((stcb == NULL) || (inp == NULL))
+ return;
+
+ if (stcb->asoc.sent_queue_retran_cnt) {
+ SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
+ stcb->asoc.sent_queue_retran_cnt);
+ stcb->asoc.sent_queue_retran_cnt = 0;
+ }
+ if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+ /* No stream scheduler information, initialize scheduler */
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0);
+ if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+ /* yep, we lost a stream or two */
+ SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n");
+ } else {
+ /* no streams lost */
+ stcb->asoc.total_output_queue_size = 0;
+ }
+ }
+ /* Check to see if some data queued, if so report it */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
+ if (sp->msg_is_complete)
+ being_filled++;
+ chks_in_queue++;
+ }
+ }
+ }
+ if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
+ SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
+ stcb->asoc.stream_queue_cnt, chks_in_queue);
+ }
+ if (chks_in_queue) {
+ /* call the output queue function */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ /*
+ * Probably should go in and make it go back through
+ * and add fragments allowed
+ */
+ if (being_filled == 0) {
+ SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
+ chks_in_queue);
+ }
+ }
+ } else {
+ SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
+ (u_long)stcb->asoc.total_output_queue_size);
+ stcb->asoc.total_output_queue_size = 0;
+ }
+}
+
+int
+sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ uint8_t net_was_pf;
+
+ if (net->dest_state & SCTP_ADDR_PF) {
+ net_was_pf = 1;
+ } else {
+ net_was_pf = 0;
+ }
+ if (net->hb_responded == 0) {
+ if (net->ro._s_addr) {
+ /* Invalidate the src address if we did not get
+ * a response last time.
+ */
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ }
+ /* Zero PBA, if it needs it */
+ if (net->partial_bytes_acked) {
+ net->partial_bytes_acked = 0;
+ }
+ if ((stcb->asoc.total_output_queue_size > 0) &&
+ (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ sctp_audit_stream_queues_for_size(inp, stcb);
+ }
+ if (!(net->dest_state & SCTP_ADDR_NOHB) &&
+ !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) {
+ /* when move to PF during threshold mangement, a HB has been
+ queued in that routine */
+ uint32_t ms_gone_by;
+
+ if ((net->last_sent_time.tv_sec > 0) ||
+ (net->last_sent_time.tv_usec > 0)) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct timeval diff;
+
+ SCTP_GETTIME_TIMEVAL(&diff);
+ timevalsub(&diff, &net->last_sent_time);
+#else
+ struct timeval diff, now;
+
+ SCTP_GETTIME_TIMEVAL(&now);
+ timersub(&now, &net->last_sent_time, &diff);
+#endif
+ ms_gone_by = (uint32_t)(diff.tv_sec * 1000) +
+ (uint32_t)(diff.tv_usec / 1000);
+ } else {
+ ms_gone_by = 0xffffffff;
+ }
+ if ((ms_gone_by >= net->heart_beat_delay) ||
+ (net->dest_state & SCTP_ADDR_PF)) {
+ sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ return (0);
+}
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ uint32_t next_mtu, mtu;
+
+ next_mtu = sctp_get_next_mtu(net->mtu);
+
+ if ((next_mtu > net->mtu) && (net->port == 0)) {
+ if ((net->src_addr_selected == 0) ||
+ (net->ro._s_addr == NULL) ||
+ (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+ if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ } else if (net->ro._s_addr == NULL) {
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* KAME hack: embed scopeid */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+ (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL);
+#else
+ (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL);
+#endif
+#elif defined(SCTP_KAME)
+ (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+#else
+ (void)in6_embedscope(&sin6->sin6_addr, sin6);
+#endif
+ }
+#endif
+
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb,
+ (sctp_route_t *)&net->ro,
+ net, 0, stcb->asoc.vrf_id);
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(sin6);
+#else
+ (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+ }
+#endif /* INET6 */
+ }
+ if (net->ro._s_addr)
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_nh);
+#else
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
+#endif
+#if defined(INET) || defined(INET6)
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+#endif
+ if (mtu > next_mtu) {
+ net->mtu = next_mtu;
+ } else {
+ net->mtu = mtu;
+ }
+ }
+ }
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+}
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ struct timeval tn, *tim_touse;
+ struct sctp_association *asoc;
+ uint32_t ticks_gone_by;
+
+ (void)SCTP_GETTIME_TIMEVAL(&tn);
+ if (stcb->asoc.sctp_autoclose_ticks > 0 &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ /* Auto close is on */
+ asoc = &stcb->asoc;
+ /* pick the time to use */
+ if (asoc->time_last_rcvd.tv_sec >
+ asoc->time_last_sent.tv_sec) {
+ tim_touse = &asoc->time_last_rcvd;
+ } else {
+ tim_touse = &asoc->time_last_sent;
+ }
+ /* Now has long enough transpired to autoclose? */
+ ticks_gone_by = sctp_secs_to_ticks((uint32_t)(tn.tv_sec - tim_touse->tv_sec));
+ if (ticks_gone_by >= asoc->sctp_autoclose_ticks) {
+ /*
+ * autoclose time has hit, call the output routine,
+ * which should do nothing just to be SURE we don't
+ * have hanging data. We can then safely check the
+ * queues and know that we are clear to send
+ * shutdown
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+ /* Are we clean? */
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue)) {
+ /*
+ * there is nothing queued to send, so I'm
+ * done...
+ */
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
+ /* only send SHUTDOWN 1st time thru */
+ struct sctp_nets *net;
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (stcb->asoc.alternate) {
+ net = stcb->asoc.alternate;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ sctp_send_shutdown(stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ }
+ } else {
+ /*
+ * No auto close at this time, reset t-o to check
+ * later
+ */
+ uint32_t tmp;
+
+ /* fool the timer startup to use the time left */
+ tmp = asoc->sctp_autoclose_ticks;
+ asoc->sctp_autoclose_ticks -= ticks_gone_by;
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
+ /* restore the real tick value */
+ asoc->sctp_autoclose_ticks = tmp;
+ }
+ }
+}
+
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.h
new file mode 100644
index 000000000..ee1c49308
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_timer.h
@@ -0,0 +1,104 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.h 359195 2020-03-21 16:12:19Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_TIMER_H_
+#define _NETINET_SCTP_TIMER_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_RTT_SHIFT 3
+#define SCTP_RTT_VAR_SHIFT 2
+
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *, struct sctp_nets *, int);
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_t1init_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_shutdown_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_heartbeat_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_cookie_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_strreset_timer(struct sctp_inpcb *, struct sctp_tcb *);
+
+int
+sctp_asconf_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_delete_prim_timer(struct sctp_inpcb *, struct sctp_tcb *);
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *);
+
+void sctp_audit_retranmission_queue(struct sctp_association *);
+
+void sctp_iterator_timer(struct sctp_iterator *it);
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+void sctp_slowtimo(void);
+#else
+void sctp_gc(struct inpcbinfo *);
+#endif
+#endif
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_uio.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_uio.h
new file mode 100644
index 000000000..6298f9334
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_uio.h
@@ -0,0 +1,1361 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_uio.h 362473 2020-06-21 23:12:56Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_UIO_H_
+#define _NETINET_SCTP_UIO_H_
+
+#if (defined(__APPLE__) && !defined(__Userspace__) && defined(KERNEL))
+#ifndef _KERNEL
+#define _KERNEL
+#endif
+#endif
+
+#if !defined(_WIN32)
+#if !defined(_KERNEL)
+#include <stdint.h>
+#endif
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+#pragma warning(push)
+#pragma warning(disable: 4200)
+#if defined(_KERNEL)
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+#endif
+
+typedef uint32_t sctp_assoc_t;
+
+#define SCTP_FUTURE_ASSOC 0
+#define SCTP_CURRENT_ASSOC 1
+#define SCTP_ALL_ASSOC 2
+
+struct sctp_event {
+ sctp_assoc_t se_assoc_id;
+ uint16_t se_type;
+ uint8_t se_on;
+};
+
+/* Compatibility to previous define's */
+#define sctp_stream_reset_events sctp_stream_reset_event
+
+/* On/Off setup for subscription to events */
+struct sctp_event_subscribe {
+ uint8_t sctp_data_io_event;
+ uint8_t sctp_association_event;
+ uint8_t sctp_address_event;
+ uint8_t sctp_send_failure_event;
+ uint8_t sctp_peer_error_event;
+ uint8_t sctp_shutdown_event;
+ uint8_t sctp_partial_delivery_event;
+ uint8_t sctp_adaptation_layer_event;
+ uint8_t sctp_authentication_event;
+ uint8_t sctp_sender_dry_event;
+ uint8_t sctp_stream_reset_event;
+};
+
+/* ancillary data types */
+#define SCTP_INIT 0x0001
+#define SCTP_SNDRCV 0x0002
+#define SCTP_EXTRCV 0x0003
+#define SCTP_SNDINFO 0x0004
+#define SCTP_RCVINFO 0x0005
+#define SCTP_NXTINFO 0x0006
+#define SCTP_PRINFO 0x0007
+#define SCTP_AUTHINFO 0x0008
+#define SCTP_DSTADDRV4 0x0009
+#define SCTP_DSTADDRV6 0x000a
+
+/*
+ * ancillary data structures
+ */
+struct sctp_initmsg {
+ uint16_t sinit_num_ostreams;
+ uint16_t sinit_max_instreams;
+ uint16_t sinit_max_attempts;
+ uint16_t sinit_max_init_timeo;
+};
+
+/* We add 96 bytes to the size of sctp_sndrcvinfo.
+ * This makes the current structure 128 bytes long
+ * which is nicely 64 bit aligned but also has room
+ * for us to add more and keep ABI compatibility.
+ * For example, already we have the sctp_extrcvinfo
+ * when enabled which is 48 bytes.
+ */
+
+/*
+ * The assoc up needs a verfid
+ * all sendrcvinfo's need a verfid for SENDING only.
+ */
+
+
+#define SCTP_ALIGN_RESV_PAD 92
+#define SCTP_ALIGN_RESV_PAD_SHORT 76
+
+struct sctp_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint16_t sinfo_keynumber;
+ uint16_t sinfo_keynumber_valid;
+ uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD];
+};
+
+struct sctp_extrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive; /* should have been sinfo_pr_value */
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint16_t serinfo_next_flags;
+ uint16_t serinfo_next_stream;
+ uint32_t serinfo_next_aid;
+ uint32_t serinfo_next_length;
+ uint32_t serinfo_next_ppid;
+ uint16_t sinfo_keynumber;
+ uint16_t sinfo_keynumber_valid;
+ uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD_SHORT];
+};
+#define sinfo_pr_value sinfo_timetolive
+#define sreinfo_next_flags serinfo_next_flags
+#define sreinfo_next_stream serinfo_next_stream
+#define sreinfo_next_aid serinfo_next_aid
+#define sreinfo_next_length serinfo_next_length
+#define sreinfo_next_ppid serinfo_next_ppid
+
+struct sctp_sndinfo {
+ uint16_t snd_sid;
+ uint16_t snd_flags;
+ uint32_t snd_ppid;
+ uint32_t snd_context;
+ sctp_assoc_t snd_assoc_id;
+};
+
+struct sctp_prinfo {
+ uint16_t pr_policy;
+ uint32_t pr_value;
+};
+
+struct sctp_default_prinfo {
+ uint16_t pr_policy;
+ uint32_t pr_value;
+ sctp_assoc_t pr_assoc_id;
+};
+
+struct sctp_authinfo {
+ uint16_t auth_keynumber;
+};
+
+struct sctp_rcvinfo {
+ uint16_t rcv_sid;
+ uint16_t rcv_ssn;
+ uint16_t rcv_flags;
+ uint32_t rcv_ppid;
+ uint32_t rcv_tsn;
+ uint32_t rcv_cumtsn;
+ uint32_t rcv_context;
+ sctp_assoc_t rcv_assoc_id;
+};
+
+struct sctp_nxtinfo {
+ uint16_t nxt_sid;
+ uint16_t nxt_flags;
+ uint32_t nxt_ppid;
+ uint32_t nxt_length;
+ sctp_assoc_t nxt_assoc_id;
+};
+
+#define SCTP_NO_NEXT_MSG 0x0000
+#define SCTP_NEXT_MSG_AVAIL 0x0001
+#define SCTP_NEXT_MSG_ISCOMPLETE 0x0002
+#define SCTP_NEXT_MSG_IS_UNORDERED 0x0004
+#define SCTP_NEXT_MSG_IS_NOTIFICATION 0x0008
+
+struct sctp_recvv_rn {
+ struct sctp_rcvinfo recvv_rcvinfo;
+ struct sctp_nxtinfo recvv_nxtinfo;
+};
+
+#define SCTP_RECVV_NOINFO 0
+#define SCTP_RECVV_RCVINFO 1
+#define SCTP_RECVV_NXTINFO 2
+#define SCTP_RECVV_RN 3
+
+#define SCTP_SENDV_NOINFO 0
+#define SCTP_SENDV_SNDINFO 1
+#define SCTP_SENDV_PRINFO 2
+#define SCTP_SENDV_AUTHINFO 3
+#define SCTP_SENDV_SPA 4
+
+struct sctp_sendv_spa {
+ uint32_t sendv_flags;
+ struct sctp_sndinfo sendv_sndinfo;
+ struct sctp_prinfo sendv_prinfo;
+ struct sctp_authinfo sendv_authinfo;
+};
+
+#define SCTP_SEND_SNDINFO_VALID 0x00000001
+#define SCTP_SEND_PRINFO_VALID 0x00000002
+#define SCTP_SEND_AUTHINFO_VALID 0x00000004
+
+struct sctp_snd_all_completes {
+ uint16_t sall_stream;
+ uint16_t sall_flags;
+ uint32_t sall_ppid;
+ uint32_t sall_context;
+ uint32_t sall_num_sent;
+ uint32_t sall_num_failed;
+};
+
+/* Flags that go into the sinfo->sinfo_flags field */
+#define SCTP_NOTIFICATION 0x0010 /* next message is a notification */
+#define SCTP_COMPLETE 0x0020 /* next message is complete */
+#define SCTP_EOF 0x0100 /* Start shutdown procedures */
+#define SCTP_ABORT 0x0200 /* Send an ABORT to peer */
+#define SCTP_UNORDERED 0x0400 /* Message is un-ordered */
+#define SCTP_ADDR_OVER 0x0800 /* Override the primary-address */
+#define SCTP_SENDALL 0x1000 /* Send this on all associations */
+#define SCTP_EOR 0x2000 /* end of message signal */
+#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
+
+#define INVALID_SINFO_FLAG(x) (((x) & 0xfffffff0 \
+ & ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
+ SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
+ SCTP_SACK_IMMEDIATELY)) != 0)
+/* for the endpoint */
+
+/* The lower four bits is an enumeration of PR-SCTP policies */
+#define SCTP_PR_SCTP_NONE 0x0000 /* Reliable transfer */
+#define SCTP_PR_SCTP_TTL 0x0001 /* Time based PR-SCTP */
+#define SCTP_PR_SCTP_PRIO 0x0002 /* Buffer based PR-SCTP */
+#define SCTP_PR_SCTP_BUF SCTP_PR_SCTP_PRIO /* For backwards compatibility */
+#define SCTP_PR_SCTP_RTX 0x0003 /* Number of retransmissions based PR-SCTP */
+#define SCTP_PR_SCTP_MAX SCTP_PR_SCTP_RTX
+#define SCTP_PR_SCTP_ALL 0x000f /* Used for aggregated stats */
+
+#define PR_SCTP_POLICY(x) ((x) & 0x0f)
+#define PR_SCTP_ENABLED(x) ((PR_SCTP_POLICY(x) != SCTP_PR_SCTP_NONE) && \
+ (PR_SCTP_POLICY(x) != SCTP_PR_SCTP_ALL))
+#define PR_SCTP_TTL_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL)
+#define PR_SCTP_BUF_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF)
+#define PR_SCTP_RTX_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX)
+#define PR_SCTP_INVALID_POLICY(x) (PR_SCTP_POLICY(x) > SCTP_PR_SCTP_MAX)
+#define PR_SCTP_VALID_POLICY(x) (PR_SCTP_POLICY(x) <= SCTP_PR_SCTP_MAX)
+
+/* Stat's */
+struct sctp_pcbinfo {
+ uint32_t ep_count;
+ uint32_t asoc_count;
+ uint32_t laddr_count;
+ uint32_t raddr_count;
+ uint32_t chk_count;
+ uint32_t readq_count;
+ uint32_t free_chunks;
+ uint32_t stream_oque;
+};
+
+struct sctp_sockstat {
+ sctp_assoc_t ss_assoc_id;
+ uint32_t ss_total_sndbuf;
+ uint32_t ss_total_recv_buf;
+};
+
+/*
+ * notification event structures
+ */
+
+/*
+ * association change event
+ */
+struct sctp_assoc_change {
+ uint16_t sac_type;
+ uint16_t sac_flags;
+ uint32_t sac_length;
+ uint16_t sac_state;
+ uint16_t sac_error;
+ uint16_t sac_outbound_streams;
+ uint16_t sac_inbound_streams;
+ sctp_assoc_t sac_assoc_id;
+ uint8_t sac_info[];
+};
+
+/* sac_state values */
+#define SCTP_COMM_UP 0x0001
+#define SCTP_COMM_LOST 0x0002
+#define SCTP_RESTART 0x0003
+#define SCTP_SHUTDOWN_COMP 0x0004
+#define SCTP_CANT_STR_ASSOC 0x0005
+
+/* sac_info values */
+#define SCTP_ASSOC_SUPPORTS_PR 0x01
+#define SCTP_ASSOC_SUPPORTS_AUTH 0x02
+#define SCTP_ASSOC_SUPPORTS_ASCONF 0x03
+#define SCTP_ASSOC_SUPPORTS_MULTIBUF 0x04
+#define SCTP_ASSOC_SUPPORTS_RE_CONFIG 0x05
+#define SCTP_ASSOC_SUPPORTS_INTERLEAVING 0x06
+#define SCTP_ASSOC_SUPPORTS_MAX 0x06
+/*
+ * Address event
+ */
+struct sctp_paddr_change {
+ uint16_t spc_type;
+ uint16_t spc_flags;
+ uint32_t spc_length;
+ struct sockaddr_storage spc_aaddr;
+ uint32_t spc_state;
+ uint32_t spc_error;
+ sctp_assoc_t spc_assoc_id;
+};
+
+/* paddr state values */
+#define SCTP_ADDR_AVAILABLE 0x0001
+#define SCTP_ADDR_UNREACHABLE 0x0002
+#define SCTP_ADDR_REMOVED 0x0003
+#define SCTP_ADDR_ADDED 0x0004
+#define SCTP_ADDR_MADE_PRIM 0x0005
+#define SCTP_ADDR_CONFIRMED 0x0006
+
+#define SCTP_ACTIVE 0x0001 /* SCTP_ADDR_REACHABLE */
+#define SCTP_INACTIVE 0x0002 /* neither SCTP_ADDR_REACHABLE
+ nor SCTP_ADDR_UNCONFIRMED */
+#define SCTP_UNCONFIRMED 0x0200 /* SCTP_ADDR_UNCONFIRMED */
+
+/* remote error events */
+struct sctp_remote_error {
+ uint16_t sre_type;
+ uint16_t sre_flags;
+ uint32_t sre_length;
+ uint16_t sre_error;
+ sctp_assoc_t sre_assoc_id;
+ uint8_t sre_data[];
+};
+
+/* data send failure event (deprecated) */
+struct sctp_send_failed {
+ uint16_t ssf_type;
+ uint16_t ssf_flags;
+ uint32_t ssf_length;
+ uint32_t ssf_error;
+ struct sctp_sndrcvinfo ssf_info;
+ sctp_assoc_t ssf_assoc_id;
+ uint8_t ssf_data[];
+};
+
+/* data send failure event (not deprecated) */
+struct sctp_send_failed_event {
+ uint16_t ssfe_type;
+ uint16_t ssfe_flags;
+ uint32_t ssfe_length;
+ uint32_t ssfe_error;
+ struct sctp_sndinfo ssfe_info;
+ sctp_assoc_t ssfe_assoc_id;
+ uint8_t ssfe_data[];
+};
+
+/* flag that indicates state of data */
+#define SCTP_DATA_UNSENT 0x0001 /* inqueue never on wire */
+#define SCTP_DATA_SENT 0x0002 /* on wire at failure */
+
+/* shutdown event */
+struct sctp_shutdown_event {
+ uint16_t sse_type;
+ uint16_t sse_flags;
+ uint32_t sse_length;
+ sctp_assoc_t sse_assoc_id;
+};
+
+/* Adaptation layer indication stuff */
+struct sctp_adaptation_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaptation_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaptation {
+ uint32_t ssb_adaptation_ind;
+};
+
+/* compatible old spelling */
+struct sctp_adaption_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaption_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaption {
+ uint32_t ssb_adaption_ind;
+};
+
+
+/*
+ * Partial Delivery API event
+ */
+struct sctp_pdapi_event {
+ uint16_t pdapi_type;
+ uint16_t pdapi_flags;
+ uint32_t pdapi_length;
+ uint32_t pdapi_indication;
+ uint16_t pdapi_stream;
+ uint16_t pdapi_seq;
+ sctp_assoc_t pdapi_assoc_id;
+};
+
+/* indication values */
+#define SCTP_PARTIAL_DELIVERY_ABORTED 0x0001
+
+
+/*
+ * authentication key event
+ */
+struct sctp_authkey_event {
+ uint16_t auth_type;
+ uint16_t auth_flags;
+ uint32_t auth_length;
+ uint16_t auth_keynumber;
+ uint16_t auth_altkeynumber;
+ uint32_t auth_indication;
+ sctp_assoc_t auth_assoc_id;
+};
+
+/* indication values */
+#define SCTP_AUTH_NEW_KEY 0x0001
+#define SCTP_AUTH_NEWKEY SCTP_AUTH_NEW_KEY
+#define SCTP_AUTH_NO_AUTH 0x0002
+#define SCTP_AUTH_FREE_KEY 0x0003
+
+
+struct sctp_sender_dry_event {
+ uint16_t sender_dry_type;
+ uint16_t sender_dry_flags;
+ uint32_t sender_dry_length;
+ sctp_assoc_t sender_dry_assoc_id;
+};
+
+
+/*
+ * Stream reset event - subscribe to SCTP_STREAM_RESET_EVENT
+ */
+struct sctp_stream_reset_event {
+ uint16_t strreset_type;
+ uint16_t strreset_flags;
+ uint32_t strreset_length;
+ sctp_assoc_t strreset_assoc_id;
+ uint16_t strreset_stream_list[];
+};
+
+/* flags in stream_reset_event (strreset_flags) */
+#define SCTP_STREAM_RESET_INCOMING_SSN 0x0001
+#define SCTP_STREAM_RESET_OUTGOING_SSN 0x0002
+#define SCTP_STREAM_RESET_DENIED 0x0004
+#define SCTP_STREAM_RESET_FAILED 0x0008
+
+/*
+ * Assoc reset event - subscribe to SCTP_ASSOC_RESET_EVENT
+ */
+struct sctp_assoc_reset_event {
+ uint16_t assocreset_type;
+ uint16_t assocreset_flags;
+ uint32_t assocreset_length;
+ sctp_assoc_t assocreset_assoc_id;
+ uint32_t assocreset_local_tsn;
+ uint32_t assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_RESET_DENIED 0x0004
+#define SCTP_ASSOC_RESET_FAILED 0x0008
+
+/*
+ * Stream change event - subscribe to SCTP_STREAM_CHANGE_EVENT
+ */
+struct sctp_stream_change_event {
+ uint16_t strchange_type;
+ uint16_t strchange_flags;
+ uint32_t strchange_length;
+ sctp_assoc_t strchange_assoc_id;
+ uint16_t strchange_instrms;
+ uint16_t strchange_outstrms;
+};
+
+#define SCTP_STREAM_CHANGE_DENIED 0x0004
+#define SCTP_STREAM_CHANGE_FAILED 0x0008
+
+
+/* SCTP notification event */
+struct sctp_tlv {
+ uint16_t sn_type;
+ uint16_t sn_flags;
+ uint32_t sn_length;
+};
+
+union sctp_notification {
+ struct sctp_tlv sn_header;
+ struct sctp_assoc_change sn_assoc_change;
+ struct sctp_paddr_change sn_paddr_change;
+ struct sctp_remote_error sn_remote_error;
+ struct sctp_send_failed sn_send_failed;
+ struct sctp_shutdown_event sn_shutdown_event;
+ struct sctp_adaptation_event sn_adaptation_event;
+ /* compatibility same as above */
+ struct sctp_adaption_event sn_adaption_event;
+ struct sctp_pdapi_event sn_pdapi_event;
+ struct sctp_authkey_event sn_auth_event;
+ struct sctp_sender_dry_event sn_sender_dry_event;
+ struct sctp_send_failed_event sn_send_failed_event;
+ struct sctp_stream_reset_event sn_strreset_event;
+ struct sctp_assoc_reset_event sn_assocreset_event;
+ struct sctp_stream_change_event sn_strchange_event;
+};
+
+/* notification types */
+#define SCTP_ASSOC_CHANGE 0x0001
+#define SCTP_PEER_ADDR_CHANGE 0x0002
+#define SCTP_REMOTE_ERROR 0x0003
+#define SCTP_SEND_FAILED 0x0004
+#define SCTP_SHUTDOWN_EVENT 0x0005
+#define SCTP_ADAPTATION_INDICATION 0x0006
+/* same as above */
+#define SCTP_ADAPTION_INDICATION 0x0006
+#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
+#define SCTP_AUTHENTICATION_EVENT 0x0008
+#define SCTP_STREAM_RESET_EVENT 0x0009
+#define SCTP_SENDER_DRY_EVENT 0x000a
+#define SCTP_NOTIFICATIONS_STOPPED_EVENT 0x000b /* we don't send this*/
+#define SCTP_ASSOC_RESET_EVENT 0x000c
+#define SCTP_STREAM_CHANGE_EVENT 0x000d
+#define SCTP_SEND_FAILED_EVENT 0x000e
+/*
+ * socket option structs
+ */
+
+struct sctp_paddrparams {
+ struct sockaddr_storage spp_address;
+ sctp_assoc_t spp_assoc_id;
+ uint32_t spp_hbinterval;
+ uint32_t spp_pathmtu;
+ uint32_t spp_flags;
+ uint32_t spp_ipv6_flowlabel;
+ uint16_t spp_pathmaxrxt;
+ uint8_t spp_dscp;
+};
+#define spp_ipv4_tos spp_dscp
+
+#define SPP_HB_ENABLE 0x00000001
+#define SPP_HB_DISABLE 0x00000002
+#define SPP_HB_DEMAND 0x00000004
+#define SPP_PMTUD_ENABLE 0x00000008
+#define SPP_PMTUD_DISABLE 0x00000010
+#define SPP_HB_TIME_IS_ZERO 0x00000080
+#define SPP_IPV6_FLOWLABEL 0x00000100
+#define SPP_DSCP 0x00000200
+#define SPP_IPV4_TOS SPP_DSCP
+
+struct sctp_paddrthlds {
+ struct sockaddr_storage spt_address;
+ sctp_assoc_t spt_assoc_id;
+ uint16_t spt_pathmaxrxt;
+ uint16_t spt_pathpfthld;
+ uint16_t spt_pathcpthld;
+};
+
+struct sctp_paddrinfo {
+ struct sockaddr_storage spinfo_address;
+ sctp_assoc_t spinfo_assoc_id;
+ int32_t spinfo_state;
+ uint32_t spinfo_cwnd;
+ uint32_t spinfo_srtt;
+ uint32_t spinfo_rto;
+ uint32_t spinfo_mtu;
+};
+
+struct sctp_rtoinfo {
+ sctp_assoc_t srto_assoc_id;
+ uint32_t srto_initial;
+ uint32_t srto_max;
+ uint32_t srto_min;
+};
+
+struct sctp_assocparams {
+ sctp_assoc_t sasoc_assoc_id;
+ uint32_t sasoc_peer_rwnd;
+ uint32_t sasoc_local_rwnd;
+ uint32_t sasoc_cookie_life;
+ uint16_t sasoc_asocmaxrxt;
+ uint16_t sasoc_number_peer_destinations;
+};
+
+struct sctp_setprim {
+ struct sockaddr_storage ssp_addr;
+ sctp_assoc_t ssp_assoc_id;
+ uint8_t ssp_padding[4];
+};
+
+struct sctp_setpeerprim {
+ struct sockaddr_storage sspp_addr;
+ sctp_assoc_t sspp_assoc_id;
+ uint8_t sspp_padding[4];
+};
+
+union sctp_sockstore {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+#if defined(__Userspace__)
+ struct sockaddr_conn sconn;
+#endif
+ struct sockaddr sa;
+};
+
+struct sctp_getaddresses {
+ sctp_assoc_t sget_assoc_id;
+ union sctp_sockstore addr[];
+};
+
+struct sctp_status {
+ sctp_assoc_t sstat_assoc_id;
+ int32_t sstat_state;
+ uint32_t sstat_rwnd;
+ uint16_t sstat_unackdata;
+ uint16_t sstat_penddata;
+ uint16_t sstat_instrms;
+ uint16_t sstat_outstrms;
+ uint32_t sstat_fragmentation_point;
+ struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * AUTHENTICATION support
+ */
+/* SCTP_AUTH_CHUNK */
+struct sctp_authchunk {
+ uint8_t sauth_chunk;
+};
+
+/* SCTP_AUTH_KEY */
+struct sctp_authkey {
+ sctp_assoc_t sca_assoc_id;
+ uint16_t sca_keynumber;
+ uint16_t sca_keylength;
+ uint8_t sca_key[];
+};
+
+/* SCTP_HMAC_IDENT */
+struct sctp_hmacalgo {
+ uint32_t shmac_number_of_idents;
+ uint16_t shmac_idents[];
+};
+
+/* AUTH hmac_id */
+#define SCTP_AUTH_HMAC_ID_RSVD 0x0000
+#define SCTP_AUTH_HMAC_ID_SHA1 0x0001 /* default, mandatory */
+#define SCTP_AUTH_HMAC_ID_SHA256 0x0003
+
+/* SCTP_AUTH_ACTIVE_KEY / SCTP_AUTH_DELETE_KEY */
+struct sctp_authkeyid {
+ sctp_assoc_t scact_assoc_id;
+ uint16_t scact_keynumber;
+};
+
+/* SCTP_PEER_AUTH_CHUNKS / SCTP_LOCAL_AUTH_CHUNKS */
+struct sctp_authchunks {
+ sctp_assoc_t gauth_assoc_id;
+ uint32_t gauth_number_of_chunks;
+ uint8_t gauth_chunks[];
+};
+
+struct sctp_assoc_value {
+ sctp_assoc_t assoc_id;
+ uint32_t assoc_value;
+};
+
+struct sctp_cc_option {
+ int option;
+ struct sctp_assoc_value aid_value;
+};
+
+struct sctp_stream_value {
+ sctp_assoc_t assoc_id;
+ uint16_t stream_id;
+ uint16_t stream_value;
+};
+
+struct sctp_assoc_ids {
+ uint32_t gaids_number_of_ids;
+ sctp_assoc_t gaids_assoc_id[];
+};
+
+struct sctp_sack_info {
+ sctp_assoc_t sack_assoc_id;
+ uint32_t sack_delay;
+ uint32_t sack_freq;
+};
+
+struct sctp_timeouts {
+ sctp_assoc_t stimo_assoc_id;
+ uint32_t stimo_init;
+ uint32_t stimo_data;
+ uint32_t stimo_sack;
+ uint32_t stimo_shutdown;
+ uint32_t stimo_heartbeat;
+ uint32_t stimo_cookie;
+ uint32_t stimo_shutdownack;
+};
+
+struct sctp_udpencaps {
+ struct sockaddr_storage sue_address;
+ sctp_assoc_t sue_assoc_id;
+ uint16_t sue_port;
+};
+
+struct sctp_prstatus {
+ sctp_assoc_t sprstat_assoc_id;
+ uint16_t sprstat_sid;
+ uint16_t sprstat_policy;
+ uint64_t sprstat_abandoned_unsent;
+ uint64_t sprstat_abandoned_sent;
+};
+
+struct sctp_cwnd_args {
+ struct sctp_nets *net; /* network to */ /* FIXME: LP64 issue */
+ uint32_t cwnd_new_value;/* cwnd in k */
+ uint32_t pseudo_cumack;
+ uint16_t inflight; /* flightsize in k */
+ uint16_t cwnd_augment; /* increment to it */
+ uint8_t meets_pseudo_cumack;
+ uint8_t need_new_pseudo_cumack;
+ uint8_t cnt_in_send;
+ uint8_t cnt_in_str;
+};
+
+struct sctp_blk_args {
+ uint32_t onsb; /* in 1k bytes */
+ uint32_t sndlen; /* len of send being attempted */
+ uint32_t peer_rwnd; /* rwnd of peer */
+ uint16_t send_sent_qcnt;/* chnk cnt */
+ uint16_t stream_qcnt; /* chnk cnt */
+ uint16_t chunks_on_oque;/* chunks out */
+ uint16_t flight_size; /* flight size in k */
+};
+
+/*
+ * Max we can reset in one setting, note this is dictated not by the define
+ * but the size of a mbuf cluster so don't change this define and think you
+ * can specify more. You must do multiple resets if you want to reset more
+ * than SCTP_MAX_EXPLICIT_STR_RESET.
+ */
+#define SCTP_MAX_EXPLICT_STR_RESET 1000
+
+struct sctp_reset_streams {
+ sctp_assoc_t srs_assoc_id;
+ uint16_t srs_flags;
+ uint16_t srs_number_streams; /* 0 == ALL */
+ uint16_t srs_stream_list[];/* list if strrst_num_streams is not 0 */
+};
+
+struct sctp_add_streams {
+ sctp_assoc_t sas_assoc_id;
+ uint16_t sas_instrms;
+ uint16_t sas_outstrms;
+};
+
+struct sctp_get_nonce_values {
+ sctp_assoc_t gn_assoc_id;
+ uint32_t gn_peers_tag;
+ uint32_t gn_local_tag;
+};
+
+/* Debugging logs */
+struct sctp_str_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t n_tsn;
+ uint32_t e_tsn;
+ uint16_t n_sseq;
+ uint16_t e_sseq;
+ uint16_t strm;
+};
+
+struct sctp_sb_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t so_sbcc;
+ uint32_t stcb_sbcc;
+ uint32_t incr;
+};
+
+struct sctp_fr_log {
+ uint32_t largest_tsn;
+ uint32_t largest_new_tsn;
+ uint32_t tsn;
+};
+
+struct sctp_fr_map {
+ uint32_t base;
+ uint32_t cum;
+ uint32_t high;
+};
+
+struct sctp_rwnd_log {
+ uint32_t rwnd;
+ uint32_t send_size;
+ uint32_t overhead;
+ uint32_t new_rwnd;
+};
+
+struct sctp_mbcnt_log {
+ uint32_t total_queue_size;
+ uint32_t size_change;
+ uint32_t total_queue_mb_size;
+ uint32_t mbcnt_change;
+};
+
+struct sctp_sack_log {
+ uint32_t cumack;
+ uint32_t oldcumack;
+ uint32_t tsn;
+ uint16_t numGaps;
+ uint16_t numDups;
+};
+
+struct sctp_lock_log {
+ void *sock; /* FIXME: LP64 issue */
+ void *inp; /* FIXME: LP64 issue */
+ uint8_t tcb_lock;
+ uint8_t inp_lock;
+ uint8_t info_lock;
+ uint8_t sock_lock;
+ uint8_t sockrcvbuf_lock;
+ uint8_t socksndbuf_lock;
+ uint8_t create_lock;
+ uint8_t resv;
+};
+
+struct sctp_rto_log {
+ void * net; /* FIXME: LP64 issue */
+ uint32_t rtt;
+};
+
+struct sctp_nagle_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t total_flight;
+ uint32_t total_in_queue;
+ uint16_t count_in_queue;
+ uint16_t count_in_flight;
+};
+
+struct sctp_sbwake_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint16_t send_q;
+ uint16_t sent_q;
+ uint16_t flight;
+ uint16_t wake_cnt;
+ uint8_t stream_qcnt; /* chnk cnt */
+ uint8_t chunks_on_oque;/* chunks out */
+ uint8_t sbflags;
+ uint8_t sctpflags;
+};
+
+struct sctp_misc_info {
+ uint32_t log1;
+ uint32_t log2;
+ uint32_t log3;
+ uint32_t log4;
+};
+
+struct sctp_log_closing {
+ void *inp; /* FIXME: LP64 issue */
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t sctp_flags;
+ uint16_t state;
+ int16_t loc;
+};
+
+struct sctp_mbuf_log {
+ struct mbuf *mp; /* FIXME: LP64 issue */
+ caddr_t ext;
+ caddr_t data;
+ uint16_t size;
+ uint8_t refcnt;
+ uint8_t mbuf_flags;
+};
+
+struct sctp_cwnd_log {
+ uint64_t time_event;
+ uint8_t from;
+ uint8_t event_type;
+ uint8_t resv[2];
+ union {
+ struct sctp_log_closing close;
+ struct sctp_blk_args blk;
+ struct sctp_cwnd_args cwnd;
+ struct sctp_str_log strlog;
+ struct sctp_fr_log fr;
+ struct sctp_fr_map map;
+ struct sctp_rwnd_log rwnd;
+ struct sctp_mbcnt_log mbcnt;
+ struct sctp_sack_log sack;
+ struct sctp_lock_log lock;
+ struct sctp_rto_log rto;
+ struct sctp_sb_log sb;
+ struct sctp_nagle_log nagle;
+ struct sctp_sbwake_log wake;
+ struct sctp_mbuf_log mb;
+ struct sctp_misc_info misc;
+ } x;
+};
+
+struct sctp_cwnd_log_req {
+ int32_t num_in_log; /* Number in log */
+ int32_t num_ret; /* Number returned */
+ int32_t start_at; /* start at this one */
+ int32_t end_at; /* end at this one */
+ struct sctp_cwnd_log log[];
+};
+
+struct sctp_timeval {
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+};
+
+struct sctpstat {
+ struct sctp_timeval sctps_discontinuitytime; /* sctpStats 18 (TimeStamp) */
+ /* MIB according to RFC 3873 */
+ uint32_t sctps_currestab; /* sctpStats 1 (Gauge32) */
+ uint32_t sctps_activeestab; /* sctpStats 2 (Counter32) */
+ uint32_t sctps_restartestab;
+ uint32_t sctps_collisionestab;
+ uint32_t sctps_passiveestab; /* sctpStats 3 (Counter32) */
+ uint32_t sctps_aborted; /* sctpStats 4 (Counter32) */
+ uint32_t sctps_shutdown; /* sctpStats 5 (Counter32) */
+ uint32_t sctps_outoftheblue; /* sctpStats 6 (Counter32) */
+ uint32_t sctps_checksumerrors; /* sctpStats 7 (Counter32) */
+ uint32_t sctps_outcontrolchunks; /* sctpStats 8 (Counter64) */
+ uint32_t sctps_outorderchunks; /* sctpStats 9 (Counter64) */
+ uint32_t sctps_outunorderchunks; /* sctpStats 10 (Counter64) */
+ uint32_t sctps_incontrolchunks; /* sctpStats 11 (Counter64) */
+ uint32_t sctps_inorderchunks; /* sctpStats 12 (Counter64) */
+ uint32_t sctps_inunorderchunks; /* sctpStats 13 (Counter64) */
+ uint32_t sctps_fragusrmsgs; /* sctpStats 14 (Counter64) */
+ uint32_t sctps_reasmusrmsgs; /* sctpStats 15 (Counter64) */
+ uint32_t sctps_outpackets; /* sctpStats 16 (Counter64) */
+ uint32_t sctps_inpackets; /* sctpStats 17 (Counter64) */
+
+ /* input statistics: */
+ uint32_t sctps_recvpackets; /* total input packets */
+ uint32_t sctps_recvdatagrams; /* total input datagrams */
+ uint32_t sctps_recvpktwithdata; /* total packets that had data */
+ uint32_t sctps_recvsacks; /* total input SACK chunks */
+ uint32_t sctps_recvdata; /* total input DATA chunks */
+ uint32_t sctps_recvdupdata; /* total input duplicate DATA chunks */
+ uint32_t sctps_recvheartbeat; /* total input HB chunks */
+ uint32_t sctps_recvheartbeatack; /* total input HB-ACK chunks */
+ uint32_t sctps_recvecne; /* total input ECNE chunks */
+ uint32_t sctps_recvauth; /* total input AUTH chunks */
+ uint32_t sctps_recvauthmissing; /* total input chunks missing AUTH */
+ uint32_t sctps_recvivalhmacid; /* total number of invalid HMAC ids received */
+ uint32_t sctps_recvivalkeyid; /* total number of invalid secret ids received */
+ uint32_t sctps_recvauthfailed; /* total number of auth failed */
+ uint32_t sctps_recvexpress; /* total fast path receives all one chunk */
+ uint32_t sctps_recvexpressm; /* total fast path multi-part data */
+ uint32_t sctps_recv_spare; /* formerly sctps_recvnocrc */
+ uint32_t sctps_recvswcrc;
+ uint32_t sctps_recvhwcrc;
+
+ /* output statistics: */
+ uint32_t sctps_sendpackets; /* total output packets */
+ uint32_t sctps_sendsacks; /* total output SACKs */
+ uint32_t sctps_senddata; /* total output DATA chunks */
+ uint32_t sctps_sendretransdata; /* total output retransmitted DATA chunks */
+ uint32_t sctps_sendfastretrans; /* total output fast retransmitted DATA chunks */
+ uint32_t sctps_sendmultfastretrans; /* total FR's that happened more than once
+ * to same chunk (u-del multi-fr algo).
+ */
+ uint32_t sctps_sendheartbeat; /* total output HB chunks */
+ uint32_t sctps_sendecne; /* total output ECNE chunks */
+ uint32_t sctps_sendauth; /* total output AUTH chunks FIXME */
+ uint32_t sctps_senderrors; /* ip_output error counter */
+ uint32_t sctps_send_spare; /* formerly sctps_sendnocrc */
+ uint32_t sctps_sendswcrc;
+ uint32_t sctps_sendhwcrc;
+ /* PCKDROPREP statistics: */
+ uint32_t sctps_pdrpfmbox; /* Packet drop from middle box */
+ uint32_t sctps_pdrpfehos; /* P-drop from end host */
+ uint32_t sctps_pdrpmbda; /* P-drops with data */
+ uint32_t sctps_pdrpmbct; /* P-drops, non-data, non-endhost */
+ uint32_t sctps_pdrpbwrpt; /* P-drop, non-endhost, bandwidth rep only */
+ uint32_t sctps_pdrpcrupt; /* P-drop, not enough for chunk header */
+ uint32_t sctps_pdrpnedat; /* P-drop, not enough data to confirm */
+ uint32_t sctps_pdrppdbrk; /* P-drop, where process_chunk_drop said break */
+ uint32_t sctps_pdrptsnnf; /* P-drop, could not find TSN */
+ uint32_t sctps_pdrpdnfnd; /* P-drop, attempt reverse TSN lookup */
+ uint32_t sctps_pdrpdiwnp; /* P-drop, e-host confirms zero-rwnd */
+ uint32_t sctps_pdrpdizrw; /* P-drop, midbox confirms no space */
+ uint32_t sctps_pdrpbadd; /* P-drop, data did not match TSN */
+ uint32_t sctps_pdrpmark; /* P-drop, TSN's marked for Fast Retran */
+ /* timeouts */
+ uint32_t sctps_timoiterator; /* Number of iterator timers that fired */
+ uint32_t sctps_timodata; /* Number of T3 data time outs */
+ uint32_t sctps_timowindowprobe; /* Number of window probe (T3) timers that fired */
+ uint32_t sctps_timoinit; /* Number of INIT timers that fired */
+ uint32_t sctps_timosack; /* Number of sack timers that fired */
+ uint32_t sctps_timoshutdown; /* Number of shutdown timers that fired */
+ uint32_t sctps_timoheartbeat; /* Number of heartbeat timers that fired */
+ uint32_t sctps_timocookie; /* Number of times a cookie timeout fired */
+ uint32_t sctps_timosecret; /* Number of times an endpoint changed its cookie secret*/
+ uint32_t sctps_timopathmtu; /* Number of PMTU timers that fired */
+ uint32_t sctps_timoshutdownack; /* Number of shutdown ack timers that fired */
+ uint32_t sctps_timoshutdownguard; /* Number of shutdown guard timers that fired */
+ uint32_t sctps_timostrmrst; /* Number of stream reset timers that fired */
+ uint32_t sctps_timoearlyfr; /* Number of early FR timers that fired */
+ uint32_t sctps_timoasconf; /* Number of times an asconf timer fired */
+ uint32_t sctps_timodelprim; /* Number of times a prim_deleted timer fired */
+ uint32_t sctps_timoautoclose; /* Number of times auto close timer fired */
+ uint32_t sctps_timoassockill; /* Number of asoc free timers expired */
+ uint32_t sctps_timoinpkill; /* Number of inp free timers expired */
+ /* former early FR counters */
+ uint32_t sctps_spare[11];
+ /* others */
+ uint32_t sctps_hdrops; /* packet shorter than header */
+ uint32_t sctps_badsum; /* checksum error */
+ uint32_t sctps_noport; /* no endpoint for port */
+ uint32_t sctps_badvtag; /* bad v-tag */
+ uint32_t sctps_badsid; /* bad SID */
+ uint32_t sctps_nomem; /* no memory */
+ uint32_t sctps_fastretransinrtt; /* number of multiple FR in a RTT window */
+ uint32_t sctps_markedretrans;
+ uint32_t sctps_naglesent; /* nagle allowed sending */
+ uint32_t sctps_naglequeued; /* nagle doesn't allow sending */
+ uint32_t sctps_maxburstqueued; /* max burst doesn't allow sending */
+ uint32_t sctps_ifnomemqueued; /* look ahead tells us no memory in
+ * interface ring buffer OR we had a
+ * send error and are queuing one send.
+ */
+ uint32_t sctps_windowprobed; /* total number of window probes sent */
+ uint32_t sctps_lowlevelerr; /* total times an output error causes us
+ * to clamp down on next user send.
+ */
+ uint32_t sctps_lowlevelerrusr; /* total times sctp_senderrors were caused from
+ * a user send from a user invoked send not
+ * a sack response
+ */
+ uint32_t sctps_datadropchklmt; /* Number of in data drops due to chunk limit reached */
+ uint32_t sctps_datadroprwnd; /* Number of in data drops due to rwnd limit reached */
+ uint32_t sctps_ecnereducedcwnd; /* Number of times a ECN reduced the cwnd */
+ uint32_t sctps_vtagexpress; /* Used express lookup via vtag */
+ uint32_t sctps_vtagbogus; /* Collision in express lookup. */
+ uint32_t sctps_primary_randry; /* Number of times the sender ran dry of user data on primary */
+ uint32_t sctps_cmt_randry; /* Same for above */
+ uint32_t sctps_slowpath_sack; /* Sacks the slow way */
+ uint32_t sctps_wu_sacks_sent; /* Window Update only sacks sent */
+ uint32_t sctps_sends_with_flags; /* number of sends with sinfo_flags !=0 */
+ uint32_t sctps_sends_with_unord; /* number of unordered sends */
+ uint32_t sctps_sends_with_eof; /* number of sends with EOF flag set */
+ uint32_t sctps_sends_with_abort; /* number of sends with ABORT flag set */
+ uint32_t sctps_protocol_drain_calls; /* number of times protocol drain called */
+ uint32_t sctps_protocol_drains_done; /* number of times we did a protocol drain */
+ uint32_t sctps_read_peeks; /* Number of times recv was called with peek */
+ uint32_t sctps_cached_chk; /* Number of cached chunks used */
+ uint32_t sctps_cached_strmoq; /* Number of cached stream oq's used */
+ uint32_t sctps_left_abandon; /* Number of unread messages abandoned by close */
+ uint32_t sctps_send_burst_avoid; /* Unused */
+ uint32_t sctps_send_cwnd_avoid; /* Send cwnd full avoidance, already max burst inflight to net */
+ uint32_t sctps_fwdtsn_map_over; /* number of map array over-runs via fwd-tsn's */
+ uint32_t sctps_queue_upd_ecne; /* Number of times we queued or updated an ECN chunk on send queue */
+ uint32_t sctps_reserved[31]; /* Future ABI compat - remove int's from here when adding new */
+};
+
+#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
+#define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1)
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+#define SCTP_STAT_INCR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x += _d)
+#define SCTP_STAT_DECR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x -= _d)
+#else
+#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d)
+#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d)
+#endif
+#else
+#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d)
+#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d)
+#endif
+/* The following macros are for handling MIB values, */
+#define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_COUNTER64(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_GAUGE32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_DECR_COUNTER32(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_COUNTER64(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_GAUGE32(_x) SCTP_STAT_DECR(_x)
+
+
+/***********************************/
+/* And something for us old timers */
+/***********************************/
+
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+#if !defined(__Userspace__)
+#ifndef ntohll
+#if defined(__linux__)
+#ifndef _BSD_SOURCE
+#define _BSD_SOURCE
+#endif
+#include <endian.h>
+#else
+#include <sys/endian.h>
+#endif
+#define ntohll(x) be64toh(x)
+#endif
+
+#ifndef htonll
+#if defined(__linux__)
+#ifndef _BSD_SOURCE
+#define _BSD_SOURCE
+#endif
+#include <endian.h>
+#else
+#include <sys/endian.h>
+#endif
+#define htonll(x) htobe64(x)
+#endif
+#endif
+#endif
+/***********************************/
+
+
+struct xsctp_inpcb {
+ uint32_t last;
+ uint32_t flags;
+ uint64_t features;
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t total_nospaces;
+ uint32_t fragmentation_point;
+ uint16_t local_port;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint16_t qlen_old;
+ uint16_t maxqlen_old;
+#else
+ uint16_t qlen;
+ uint16_t maxqlen;
+#endif
+ uint16_t __spare16;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ kvaddr_t socket;
+#else
+ void *socket;
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint32_t qlen;
+ uint32_t maxqlen;
+#endif
+ uint32_t extra_padding[26]; /* future */
+};
+
+struct xsctp_tcb {
+ union sctp_sockstore primary_addr; /* sctpAssocEntry 5/6 */
+ uint32_t last;
+ uint32_t heartbeat_interval; /* sctpAssocEntry 7 */
+ uint32_t state; /* sctpAssocEntry 8 */
+ uint32_t in_streams; /* sctpAssocEntry 9 */
+ uint32_t out_streams; /* sctpAssocEntry 10 */
+ uint32_t max_nr_retrans; /* sctpAssocEntry 11 */
+ uint32_t primary_process; /* sctpAssocEntry 12 */
+ uint32_t T1_expireries; /* sctpAssocEntry 13 */
+ uint32_t T2_expireries; /* sctpAssocEntry 14 */
+ uint32_t retransmitted_tsns; /* sctpAssocEntry 15 */
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t local_tag;
+ uint32_t remote_tag;
+ uint32_t initial_tsn;
+ uint32_t highest_tsn;
+ uint32_t cumulative_tsn;
+ uint32_t cumulative_tsn_ack;
+ uint32_t mtu;
+ uint32_t refcnt;
+ uint16_t local_port; /* sctpAssocEntry 3 */
+ uint16_t remote_port; /* sctpAssocEntry 4 */
+ struct sctp_timeval start_time; /* sctpAssocEntry 16 */
+ struct sctp_timeval discontinuity_time; /* sctpAssocEntry 17 */
+ uint32_t peers_rwnd;
+ sctp_assoc_t assoc_id; /* sctpAssocEntry 1 */
+ uint32_t extra_padding[32]; /* future */
+};
+
+struct xsctp_laddr {
+ union sctp_sockstore address; /* sctpAssocLocalAddrEntry 1/2 */
+ uint32_t last;
+ struct sctp_timeval start_time; /* sctpAssocLocalAddrEntry 3 */
+ uint32_t extra_padding[32]; /* future */
+};
+
+struct xsctp_raddr {
+ union sctp_sockstore address; /* sctpAssocLocalRemEntry 1/2 */
+ uint32_t last;
+ uint32_t rto; /* sctpAssocLocalRemEntry 5 */
+ uint32_t max_path_rtx; /* sctpAssocLocalRemEntry 6 */
+ uint32_t rtx; /* sctpAssocLocalRemEntry 7 */
+ uint32_t error_counter; /* */
+ uint32_t cwnd; /* */
+ uint32_t flight_size; /* */
+ uint32_t mtu; /* */
+ uint8_t active; /* sctpAssocLocalRemEntry 3 */
+ uint8_t confirmed; /* */
+ uint8_t heartbeat_enabled; /* sctpAssocLocalRemEntry 4 */
+ uint8_t potentially_failed;
+ struct sctp_timeval start_time; /* sctpAssocLocalRemEntry 8 */
+ uint32_t rtt;
+ uint32_t heartbeat_interval;
+ uint32_t ssthresh;
+ uint16_t encaps_port;
+ uint16_t state;
+ uint32_t extra_padding[29]; /* future */
+};
+
+#define SCTP_MAX_LOGGING_SIZE 30000
+#define SCTP_TRACE_PARAMS 6 /* This number MUST be even */
+
+struct sctp_log_entry {
+ uint64_t timestamp;
+ uint32_t subsys;
+ uint32_t padding;
+ uint32_t params[SCTP_TRACE_PARAMS];
+};
+
+struct sctp_log {
+ struct sctp_log_entry entry[SCTP_MAX_LOGGING_SIZE];
+ uint32_t index;
+ uint32_t padding;
+};
+
+/*
+ * Kernel defined for sctp_send
+ */
+#if defined(_KERNEL) || defined(__Userspace__)
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *i_pak,
+ struct mbuf *control,
+ int flags,
+ struct sctp_sndrcvinfo *srcv
+#if !defined(__Userspace__)
+#if defined(__FreeBSD__)
+ ,struct thread *p
+#elif defined(_WIN32)
+ , PKTHREAD p
+#else
+ ,struct proc *p
+#endif
+#endif
+);
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo);
+#endif
+
+/*
+ * API system calls
+ */
+#if !(defined(_KERNEL)) && !(defined(__Userspace__))
+
+__BEGIN_DECLS
+int sctp_peeloff(int, sctp_assoc_t);
+int sctp_bindx(int, struct sockaddr *, int, int);
+int sctp_connectx(int, const struct sockaddr *, int, sctp_assoc_t *);
+int sctp_getaddrlen(sa_family_t);
+int sctp_getpaddrs(int, sctp_assoc_t, struct sockaddr **);
+void sctp_freepaddrs(struct sockaddr *);
+int sctp_getladdrs(int, sctp_assoc_t, struct sockaddr **);
+void sctp_freeladdrs(struct sockaddr *);
+int sctp_opt_info(int, sctp_assoc_t, int, void *, socklen_t *);
+
+/* deprecated */
+ssize_t sctp_sendmsg(int, const void *, size_t, const struct sockaddr *,
+ socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t);
+
+/* deprecated */
+ssize_t sctp_send(int, const void *, size_t,
+ const struct sctp_sndrcvinfo *, int);
+
+/* deprecated */
+ssize_t sctp_sendx(int, const void *, size_t, struct sockaddr *,
+ int, struct sctp_sndrcvinfo *, int);
+
+/* deprecated */
+ssize_t sctp_sendmsgx(int sd, const void *, size_t, struct sockaddr *,
+ int, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t);
+
+sctp_assoc_t sctp_getassocid(int, struct sockaddr *);
+
+/* deprecated */
+ssize_t sctp_recvmsg(int, void *, size_t, struct sockaddr *, socklen_t *,
+ struct sctp_sndrcvinfo *, int *);
+
+ssize_t sctp_sendv(int, const struct iovec *, int, struct sockaddr *,
+ int, void *, socklen_t, unsigned int, int);
+
+ssize_t sctp_recvv(int, const struct iovec *, int, struct sockaddr *,
+ socklen_t *, void *, socklen_t *, unsigned int *, int *);
+__END_DECLS
+
+#endif /* !_KERNEL */
+#endif /* !__sctp_uio_h__ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_userspace.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_userspace.c
new file mode 100644
index 000000000..8e2ebf462
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_userspace.c
@@ -0,0 +1,406 @@
+/*-
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+
+#ifdef _WIN32
+#include <netinet/sctp_pcb.h>
+#include <sys/timeb.h>
+#include <iphlpapi.h>
+#if !defined(__MINGW32__)
+#pragma comment(lib, "IPHLPAPI.lib")
+#endif
+#endif
+#include <netinet/sctp_os_userspace.h>
+#if defined(__FreeBSD__)
+#include <pthread_np.h>
+#endif
+
+#if defined(__linux__)
+#include <sys/prctl.h>
+#endif
+
+#if defined(_WIN32)
+/* Adapter to translate Unix thread start routines to Windows thread start
+ * routines.
+ */
+#if defined(__MINGW32__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+static DWORD WINAPI
+sctp_create_thread_adapter(void *arg) {
+ start_routine_t start_routine = (start_routine_t)arg;
+ return start_routine(NULL) == NULL;
+}
+
+int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine)
+{
+ *thread = CreateThread(NULL, 0, sctp_create_thread_adapter,
+ (void *)start_routine, 0, NULL);
+ if (*thread == NULL)
+ return GetLastError();
+ return 0;
+}
+
+#if defined(__MINGW32__)
+#pragma GCC diagnostic pop
+#endif
+
+#else
+int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine)
+{
+ return pthread_create(thread, NULL, start_routine, NULL);
+}
+#endif
+
+void
+sctp_userspace_set_threadname(const char *name)
+{
+#if defined(__APPLE__)
+ pthread_setname_np(name);
+#endif
+#if defined(__linux__)
+ prctl(PR_SET_NAME, name);
+#endif
+#if defined(__FreeBSD__)
+ pthread_set_name_np(pthread_self(), name);
+#endif
+}
+
+#if !defined(_WIN32) && !defined(__native_client__)
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+ struct ifreq ifr;
+ int fd;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ if (if_indextoname(if_index, ifr.ifr_name) != NULL) {
+ /* TODO can I use the raw socket here and not have to open a new one with each query? */
+ if ((fd = socket(af, SOCK_DGRAM, 0)) < 0)
+ return (0);
+ if (ioctl(fd, SIOCGIFMTU, &ifr) < 0) {
+ close(fd);
+ return (0);
+ }
+ close(fd);
+ return ifr.ifr_mtu;
+ } else {
+ return (0);
+ }
+}
+#endif
+
+#if defined(__native_client__)
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+ return 1280;
+}
+#endif
+
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__linux__) || defined(__native_client__) || defined(__NetBSD__) || defined(_WIN32) || defined(__Fuchsia__)
+int
+timingsafe_bcmp(const void *b1, const void *b2, size_t n)
+{
+ const unsigned char *p1 = b1, *p2 = b2;
+ int ret = 0;
+
+ for (; n > 0; n--)
+ ret |= *p1++ ^ *p2++;
+ return (ret != 0);
+}
+#endif
+
+#ifdef _WIN32
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+ PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
+ DWORD AdapterAddrsSize, Err;
+ int ret;
+
+ ret = 0;
+ AdapterAddrsSize = 0;
+ pAdapterAddrs = NULL;
+ if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+ if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+ SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() sizing failed with error code %d, AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+ ret = -1;
+ goto cleanup;
+ }
+ }
+ if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+ SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n");
+ ret = -1;
+ goto cleanup;
+ }
+ if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+ SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() failed with error code %d\n", Err);
+ ret = -1;
+ goto cleanup;
+ }
+ for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+ if (pAdapt->IfIndex == if_index) {
+ ret = pAdapt->Mtu;
+ break;
+ }
+ }
+cleanup:
+ if (pAdapterAddrs != NULL) {
+ GlobalFree(pAdapterAddrs);
+ }
+ return (ret);
+}
+
+void
+getwintimeofday(struct timeval *tv)
+{
+ FILETIME filetime;
+ ULARGE_INTEGER ularge;
+
+ GetSystemTimeAsFileTime(&filetime);
+ ularge.LowPart = filetime.dwLowDateTime;
+ ularge.HighPart = filetime.dwHighDateTime;
+ /* Change base from Jan 1 1601 00:00:00 to Jan 1 1970 00:00:00 */
+#if defined(__MINGW32__)
+ ularge.QuadPart -= 116444736000000000ULL;
+#else
+ ularge.QuadPart -= 116444736000000000UI64;
+#endif
+ /*
+ * ularge.QuadPart is now the number of 100-nanosecond intervals
+ * since Jan 1 1970 00:00:00.
+ */
+#if defined(__MINGW32__)
+ tv->tv_sec = (long)(ularge.QuadPart / 10000000ULL);
+ tv->tv_usec = (long)((ularge.QuadPart % 10000000ULL) / 10ULL);
+#else
+ tv->tv_sec = (long)(ularge.QuadPart / 10000000UI64);
+ tv->tv_usec = (long)((ularge.QuadPart % 10000000UI64) / 10UI64);
+#endif
+}
+
+int
+win_if_nametoindex(const char *ifname)
+{
+ IP_ADAPTER_ADDRESSES *addresses, *addr;
+ ULONG status, size;
+ int index = 0;
+
+ if (!ifname) {
+ return 0;
+ }
+
+ size = 0;
+ status = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &size);
+ if (status != ERROR_BUFFER_OVERFLOW) {
+ return 0;
+ }
+ addresses = malloc(size);
+ status = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, addresses, &size);
+ if (status == ERROR_SUCCESS) {
+ for (addr = addresses; addr; addr = addr->Next) {
+ if (addr->AdapterName && !strcmp(ifname, addr->AdapterName)) {
+ index = addr->IfIndex;
+ break;
+ }
+ }
+ }
+
+ free(addresses);
+ return index;
+}
+
+#if WINVER < 0x0600
+/* These functions are written based on the code at
+ * http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
+ * Therefore, for the rest of the file the following applies:
+ *
+ *
+ * Copyright and Licensing Information for ACE(TM), TAO(TM), CIAO(TM),
+ * DAnCE(TM), and CoSMIC(TM)
+ *
+ * [1]ACE(TM), [2]TAO(TM), [3]CIAO(TM), DAnCE(TM), and [4]CoSMIC(TM)
+ * (henceforth referred to as "DOC software") are copyrighted by
+ * [5]Douglas C. Schmidt and his [6]research group at [7]Washington
+ * University, [8]University of California, Irvine, and [9]Vanderbilt
+ * University, Copyright (c) 1993-2012, all rights reserved. Since DOC
+ * software is open-source, freely available software, you are free to
+ * use, modify, copy, and distribute--perpetually and irrevocably--the
+ * DOC software source code and object code produced from the source, as
+ * well as copy and distribute modified versions of this software. You
+ * must, however, include this copyright statement along with any code
+ * built using DOC software that you release. No copyright statement
+ * needs to be provided if you just ship binary executables of your
+ * software products.
+ *
+ * You can use DOC software in commercial and/or binary software releases
+ * and are under no obligation to redistribute any of your source code
+ * that is built using DOC software. Note, however, that you may not
+ * misappropriate the DOC software code, such as copyrighting it yourself
+ * or claiming authorship of the DOC software code, in a way that will
+ * prevent DOC software from being distributed freely using an
+ * open-source development model. You needn't inform anyone that you're
+ * using DOC software in your software, though we encourage you to let
+ * [10]us know so we can promote your project in the [11]DOC software
+ * success stories.
+ *
+ * The [12]ACE, [13]TAO, [14]CIAO, [15]DAnCE, and [16]CoSMIC web sites
+ * are maintained by the [17]DOC Group at the [18]Institute for Software
+ * Integrated Systems (ISIS) and the [19]Center for Distributed Object
+ * Computing of Washington University, St. Louis for the development of
+ * open-source software as part of the open-source software community.
+ * Submissions are provided by the submitter ``as is'' with no warranties
+ * whatsoever, including any warranty of merchantability, noninfringement
+ * of third party intellectual property, or fitness for any particular
+ * purpose. In no event shall the submitter be liable for any direct,
+ * indirect, special, exemplary, punitive, or consequential damages,
+ * including without limitation, lost profits, even if advised of the
+ * possibility of such damages. Likewise, DOC software is provided as is
+ * with no warranties of any kind, including the warranties of design,
+ * merchantability, and fitness for a particular purpose,
+ * noninfringement, or arising from a course of dealing, usage or trade
+ * practice. Washington University, UC Irvine, Vanderbilt University,
+ * their employees, and students shall have no liability with respect to
+ * the infringement of copyrights, trade secrets or any patents by DOC
+ * software or any part thereof. Moreover, in no event will Washington
+ * University, UC Irvine, or Vanderbilt University, their employees, or
+ * students be liable for any lost revenue or profits or other special,
+ * indirect and consequential damages.
+ *
+ * DOC software is provided with no support and without any obligation on
+ * the part of Washington University, UC Irvine, Vanderbilt University,
+ * their employees, or students to assist in its use, correction,
+ * modification, or enhancement. A [20]number of companies around the
+ * world provide commercial support for DOC software, however. DOC
+ * software is Y2K-compliant, as long as the underlying OS platform is
+ * Y2K-compliant. Likewise, DOC software is compliant with the new US
+ * daylight savings rule passed by Congress as "The Energy Policy Act of
+ * 2005," which established new daylight savings times (DST) rules for
+ * the United States that expand DST as of March 2007. Since DOC software
+ * obtains time/date and calendaring information from operating systems
+ * users will not be affected by the new DST rules as long as they
+ * upgrade their operating systems accordingly.
+ *
+ * The names ACE(TM), TAO(TM), CIAO(TM), DAnCE(TM), CoSMIC(TM),
+ * Washington University, UC Irvine, and Vanderbilt University, may not
+ * be used to endorse or promote products or services derived from this
+ * source without express written permission from Washington University,
+ * UC Irvine, or Vanderbilt University. This license grants no permission
+ * to call products or services derived from this source ACE(TM),
+ * TAO(TM), CIAO(TM), DAnCE(TM), or CoSMIC(TM), nor does it grant
+ * permission for the name Washington University, UC Irvine, or
+ * Vanderbilt University to appear in their names.
+ *
+ * If you have any suggestions, additions, comments, or questions, please
+ * let [21]me know.
+ *
+ * [22]Douglas C. Schmidt
+ *
+ * References
+ *
+ * 1. http://www.cs.wustl.edu/~schmidt/ACE.html
+ * 2. http://www.cs.wustl.edu/~schmidt/TAO.html
+ * 3. http://www.dre.vanderbilt.edu/CIAO/
+ * 4. http://www.dre.vanderbilt.edu/cosmic/
+ * 5. http://www.dre.vanderbilt.edu/~schmidt/
+ * 6. http://www.cs.wustl.edu/~schmidt/ACE-members.html
+ * 7. http://www.wustl.edu/
+ * 8. http://www.uci.edu/
+ * 9. http://www.vanderbilt.edu/
+ * 10. mailto:doc_group@cs.wustl.edu
+ * 11. http://www.cs.wustl.edu/~schmidt/ACE-users.html
+ * 12. http://www.cs.wustl.edu/~schmidt/ACE.html
+ * 13. http://www.cs.wustl.edu/~schmidt/TAO.html
+ * 14. http://www.dre.vanderbilt.edu/CIAO/
+ * 15. http://www.dre.vanderbilt.edu/~schmidt/DOC_ROOT/DAnCE/
+ * 16. http://www.dre.vanderbilt.edu/cosmic/
+ * 17. http://www.dre.vanderbilt.edu/
+ * 18. http://www.isis.vanderbilt.edu/
+ * 19. http://www.cs.wustl.edu/~schmidt/doc-center.html
+ * 20. http://www.cs.wustl.edu/~schmidt/commercial-support.html
+ * 21. mailto:d.schmidt@vanderbilt.edu
+ * 22. http://www.dre.vanderbilt.edu/~schmidt/
+ * 23. http://www.cs.wustl.edu/ACE.html
+ */
+
+void
+InitializeXPConditionVariable(userland_cond_t *cv)
+{
+ cv->waiters_count = 0;
+ InitializeCriticalSection(&(cv->waiters_count_lock));
+ cv->events_[C_SIGNAL] = CreateEvent (NULL, FALSE, FALSE, NULL);
+ cv->events_[C_BROADCAST] = CreateEvent (NULL, TRUE, FALSE, NULL);
+}
+
+void
+DeleteXPConditionVariable(userland_cond_t *cv)
+{
+ CloseHandle(cv->events_[C_BROADCAST]);
+ CloseHandle(cv->events_[C_SIGNAL]);
+ DeleteCriticalSection(&(cv->waiters_count_lock));
+}
+
+int
+SleepXPConditionVariable(userland_cond_t *cv, userland_mutex_t *mtx)
+{
+ int result, last_waiter;
+
+ EnterCriticalSection(&cv->waiters_count_lock);
+ cv->waiters_count++;
+ LeaveCriticalSection(&cv->waiters_count_lock);
+ LeaveCriticalSection (mtx);
+ result = WaitForMultipleObjects(2, cv->events_, FALSE, INFINITE);
+ if (result==-1) {
+ result = GetLastError();
+ }
+ EnterCriticalSection(&cv->waiters_count_lock);
+ cv->waiters_count--;
+ last_waiter = result == (C_SIGNAL + C_BROADCAST && (cv->waiters_count == 0));
+ LeaveCriticalSection(&cv->waiters_count_lock);
+ if (last_waiter)
+ ResetEvent(cv->events_[C_BROADCAST]);
+ EnterCriticalSection (mtx);
+ return result;
+}
+
+void
+WakeAllXPConditionVariable(userland_cond_t *cv)
+{
+ int have_waiters;
+ EnterCriticalSection(&cv->waiters_count_lock);
+ have_waiters = cv->waiters_count > 0;
+ LeaveCriticalSection(&cv->waiters_count_lock);
+ if (have_waiters)
+ SetEvent (cv->events_[C_BROADCAST]);
+}
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_usrreq.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_usrreq.c
new file mode 100644
index 000000000..bf66b3620
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_usrreq.c
@@ -0,0 +1,9071 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 363275 2020-07-17 15:09:49Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_var.h>
+#ifdef INET6
+#include <netinet6/sctp6_var.h>
+#endif
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_callout.h>
+#else
+#include <netinet/udp.h>
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/eventhandler.h>
+#endif
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+#include <netinet/sctp_peeloff.h>
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+
+extern const struct sctp_cc_functions sctp_cc_functions[];
+extern const struct sctp_ss_functions sctp_ss_functions[];
+
+void
+#if defined(__Userspace__)
+sctp_init(uint16_t port,
+ int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*debug_printf)(const char *format, ...), int start_threads)
+#elif defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) &&!defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+sctp_init(struct protosw *pp SCTP_UNUSED, struct domain *dp SCTP_UNUSED)
+#else
+sctp_init(void)
+#endif
+{
+#if !defined(__Userspace__)
+ u_long sb_max_adj;
+
+#else
+ init_random();
+#endif
+ /* Initialize and modify the sysctled variables */
+ sctp_init_sysctls();
+#if defined(__Userspace__)
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = port;
+#else
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sb_max_adj = (u_long)((u_quad_t) (sb_max) * MCLBYTES / (MSIZE + MCLBYTES));
+ SCTP_BASE_SYSCTL(sctp_sendspace) = sb_max_adj;
+#else
+ if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
+ /*
+ * Allow a user to take no more than 1/2 the number of clusters or
+ * the SB_MAX, whichever is smaller, for the send window.
+ */
+ sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
+ SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
+ (((uint32_t)nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
+#endif
+ /*
+ * Now for the recv window, should we take the same amount? or
+ * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
+ * now I will just copy.
+ */
+ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
+#endif
+ SCTP_BASE_VAR(first_time) = 0;
+ SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
+#if defined(__Userspace__)
+#if !defined(_WIN32)
+#if defined(INET) || defined(INET6)
+ SCTP_BASE_VAR(userspace_route) = -1;
+#endif
+#endif
+#ifdef INET
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+#endif
+#ifdef INET6
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+#endif
+ SCTP_BASE_VAR(timer_thread_should_exit) = 0;
+ SCTP_BASE_VAR(conn_output) = conn_output;
+ SCTP_BASE_VAR(debug_printf) = debug_printf;
+ SCTP_BASE_VAR(crc32c_offloaded) = 0;
+ SCTP_BASE_VAR(iterator_thread_started) = 0;
+ SCTP_BASE_VAR(timer_thread_started) = 0;
+#endif
+#if defined(__Userspace__)
+ sctp_pcb_init(start_threads);
+ if (start_threads) {
+ sctp_start_timer_thread();
+ }
+#else
+ sctp_pcb_init();
+#endif
+#if defined(SCTP_PACKET_LOGGING)
+ SCTP_BASE_VAR(packet_log_writers) = 0;
+ SCTP_BASE_VAR(packet_log_end) = 0;
+ memset(&SCTP_BASE_VAR(packet_log_buffer), 0, SCTP_PACKET_LOG_SIZE);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_BASE_VAR(sctp_main_timer_ticks) = 0;
+ sctp_start_main_timer();
+ timeout(sctp_delayed_startup, NULL, 1);
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_BASE_VAR(eh_tag) = EVENTHANDLER_REGISTER(rt_addrmsg,
+ sctp_addr_change_event_handler, NULL, EVENTHANDLER_PRI_FIRST);
+#endif
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#ifdef VIMAGE
+static void
+sctp_finish(void *unused __unused)
+{
+ EVENTHANDLER_DEREGISTER(rt_addrmsg, SCTP_BASE_VAR(eh_tag));
+ sctp_pcb_finish();
+}
+VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL);
+#endif
+#else
+void
+sctp_finish(void)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ untimeout(sctp_delayed_startup, NULL);
+ sctp_over_udp_stop();
+ sctp_address_monitor_stop();
+ sctp_stop_main_timer();
+#endif
+#if defined(__Userspace__)
+#if defined(INET) || defined(INET6)
+ recv_thread_destroy();
+#endif
+ sctp_stop_timer_thread();
+#endif
+ sctp_pcb_finish();
+#if defined(_WIN32) && !defined(__Userspace__)
+ sctp_finish_sysctls();
+#endif
+}
+#endif
+
+void
+sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint16_t nxtsz)
+{
+ struct sctp_tmit_chunk *chk;
+ uint16_t overhead;
+
+ /* Adjust that too */
+ stcb->asoc.smallest_mtu = nxtsz;
+ /* now off to subtract IP_DF flag if needed */
+ overhead = IP_HDR_SIZE + sizeof(struct sctphdr);
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+ if ((chk->send_size + overhead) > nxtsz) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->send_size + overhead) > nxtsz) {
+ /*
+ * For this guy we also mark for immediate resend
+ * since we sent to big of chunk
+ */
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ chk->rec.data.doing_fast_retransmit = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uint32_t)(uintptr_t)chk->whoTo,
+ chk->rec.data.tsn);
+ }
+ /* Clear any time so NO RTT is being done */
+ if (chk->do_rtt == 1) {
+ chk->do_rtt = 0;
+ chk->whoTo->rto_needed = 1;
+ }
+ }
+ }
+ }
+}
+
+#ifdef INET
+#if !defined(__Userspace__)
+void
+sctp_notify(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint8_t icmp_type,
+ uint8_t icmp_code,
+ uint16_t ip_len,
+ uint32_t next_mtu)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+ int timer_stopped;
+
+ if (icmp_type != ICMP_UNREACH) {
+ /* We only care about unreachable */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if ((icmp_code == ICMP_UNREACH_NET) ||
+ (icmp_code == ICMP_UNREACH_HOST) ||
+ (icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
+ (icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
+ (icmp_code == ICMP_UNREACH_ISOLATED) ||
+ (icmp_code == ICMP_UNREACH_NET_PROHIB) ||
+ (icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
+#if defined(__NetBSD__)
+ (icmp_code == ICMP_UNREACH_ADMIN_PROHIBIT)) {
+#else
+ (icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
+#endif
+ /* Mark the net unreachable. */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* OK, that destination is NOT reachable. */
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_PF;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, 0,
+ (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else if ((icmp_code == ICMP_UNREACH_PROTOCOL) ||
+ (icmp_code == ICMP_UNREACH_PORT)) {
+ /* Treat it like an ABORT. */
+ sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+ /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed.*/
+#endif
+ /* no need to unlock here, since the TCB is gone */
+ } else if (icmp_code == ICMP_UNREACH_NEEDFRAG) {
+ if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* Find the next (smaller) MTU */
+ if (next_mtu == 0) {
+ /*
+ * Old type router that does not tell us what the next
+ * MTU is.
+ * Rats we will have to guess (in a educated fashion
+ * of course).
+ */
+ next_mtu = sctp_get_prev_mtu(ip_len);
+ }
+ /* Stop the PMTU timer. */
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ timer_stopped = 1;
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
+ } else {
+ timer_stopped = 0;
+ }
+ /* Update the path MTU. */
+ if (net->port) {
+ next_mtu -= sizeof(struct udphdr);
+ }
+ if (net->mtu > next_mtu) {
+ net->mtu = next_mtu;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (net->port) {
+ sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr));
+ } else {
+ sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu);
+ }
+#endif
+ }
+ /* Update the association MTU */
+ if (stcb->asoc.smallest_mtu > next_mtu) {
+ sctp_pathmtu_adjustment(stcb, next_mtu);
+ }
+ /* Finally, start the PMTU timer if it was running before. */
+ if (timer_stopped) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+#endif
+
+#if !defined(__Userspace__)
+void
+#if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN)
+sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip, struct ifnet *ifp SCTP_UNUSED)
+#else
+sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+#endif
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct ip *outer_ip;
+#endif
+ struct ip *inner_ip;
+ struct sctphdr *sh;
+ struct icmp *icmp;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct sctp_init_chunk *ch;
+#endif
+ struct sockaddr_in src, dst;
+
+ if (sa->sa_family != AF_INET ||
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
+ return;
+ }
+ if (PRC_IS_REDIRECT(cmd)) {
+ vip = NULL;
+ } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
+ return;
+ }
+ if (vip != NULL) {
+ inner_ip = (struct ip *)vip;
+ icmp = (struct icmp *)((caddr_t)inner_ip -
+ (sizeof(struct icmp) - sizeof(struct ip)));
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
+#endif
+ sh = (struct sctphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
+ memset(&src, 0, sizeof(struct sockaddr_in));
+ src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ src.sin_len = sizeof(struct sockaddr_in);
+#endif
+ src.sin_port = sh->src_port;
+ src.sin_addr = inner_ip->ip_src;
+ memset(&dst, 0, sizeof(struct sockaddr_in));
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ dst.sin_port = sh->dest_port;
+ dst.sin_addr = inner_ip->ip_dst;
+ /*
+ * 'dst' holds the dest of the packet that failed to be sent.
+ * 'src' holds our local endpoint address. Thus we reverse
+ * the dst and the src in the lookup.
+ */
+ inp = NULL;
+ net = NULL;
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+ (struct sockaddr *)&src,
+ &inp, &net, 1,
+ SCTP_DEFAULT_VRFID);
+ if ((stcb != NULL) &&
+ (net != NULL) &&
+ (inp != NULL)) {
+ /* Check the verification tag */
+ if (ntohl(sh->v_tag) != 0) {
+ /*
+ * This must be the verification tag used for
+ * sending out packets. We don't consider
+ * packets reflecting the verification tag.
+ */
+ if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ntohs(outer_ip->ip_len) >=
+ sizeof(struct ip) +
+ 8 + (inner_ip->ip_hl << 2) + 20) {
+ /*
+ * In this case we can check if we
+ * got an INIT chunk and if the
+ * initiate tag matches.
+ */
+ ch = (struct sctp_init_chunk *)(sh + 1);
+ if ((ch->ch.chunk_type != SCTP_INITIATION) ||
+ (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+#else
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+#endif
+ }
+ sctp_notify(inp, stcb, net,
+ icmp->icmp_type,
+ icmp->icmp_code,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ ntohs(inner_ip->ip_len),
+#else
+ inner_ip->ip_len,
+#endif
+ (uint32_t)ntohs(icmp->icmp_nextmtu));
+#if defined(__Userspace__)
+ if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ struct socket *upcall_socket;
+
+ upcall_socket = stcb->sctp_socket;
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ if ((upcall_socket->so_upcall != NULL) &&
+ (upcall_socket->so_error != 0)) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ }
+ return;
+}
+#endif
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static int
+sctp_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error;
+ uint32_t vrf_id;
+
+ /* FIX, for non-bsd is this right? */
+ vrf_id = SCTP_DEFAULT_VRFID;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+
+ if (error)
+ return (error);
+
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ stcb = sctp_findassociation_addr_sa(sintosa(&addrs[1]),
+ sintosa(&addrs[0]),
+ &inp, &net, 1, vrf_id);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ if ((inp != NULL) && (stcb == NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ goto cred_can_cont;
+ }
+
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ goto out;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /* We use the write lock here, only
+ * since in the error leg we need it.
+ * If we used RLOCK, then we would have
+ * to wlock/decr/unlock/rlock. Which
+ * in theory could create a hole. Better
+ * to use higher wlock.
+ */
+ SCTP_INP_WLOCK(inp);
+ cred_can_cont:
+ error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
+ if (error) {
+ SCTP_INP_WUNLOCK(inp);
+ goto out;
+ }
+ cru2x(inp->sctp_socket->so_cred, &xuc);
+ SCTP_INP_WUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+out:
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred,
+ CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ 0, 0, sctp_getcred, "S,ucred",
+ "Get the ucred of a SCTP connection");
+#endif
+
+
+#ifdef INET
+#if defined(_WIN32) || defined(__Userspace__)
+int
+#elif defined(__FreeBSD__)
+static void
+#else
+static int
+#endif
+sctp_abort(struct socket *so)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ return;
+#else
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+#endif
+ }
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /* same for the rcv ones, they are only
+ * here for the accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so->so_usecount--;
+#else
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+#endif
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+ return;
+#else
+ return (0);
+#endif
+}
+
+#if defined(__Userspace__)
+int
+#else
+static int
+#endif
+#if defined(__Userspace__)
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id)
+#elif defined(__FreeBSD__)
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED)
+#elif defined(_WIN32)
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, PKTHREAD p SCTP_UNUSED)
+#else
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED)
+#endif
+{
+ struct sctp_inpcb *inp;
+ struct inpcb *ip_inp;
+ int error;
+#if !defined(__Userspace__)
+ uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+ if (error) {
+ return (error);
+ }
+ }
+ error = sctp_inpcb_alloc(so, vrf_id);
+ if (error) {
+ return (error);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
+ ip_inp = &inp->ip_inp.inp;
+ ip_inp->inp_vflag |= INP_IPV4;
+ ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+ SCTP_INP_WUNLOCK(inp);
+ return (0);
+}
+
+#if defined(__Userspace__)
+int
+sctp_bind(struct socket *so, struct sockaddr *addr) {
+ void *p = NULL;
+#elif defined(__FreeBSD__)
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#elif defined(__APPLE__)
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p) {
+#elif defined(_WIN32)
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, PKTHREAD p) {
+#else
+static int
+sctp_bind(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+ struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *): NULL;
+
+#endif
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (addr != NULL) {
+#ifdef HAVE_SA_LEN
+ if ((addr->sa_family != AF_INET) ||
+ (addr->sa_len != sizeof(struct sockaddr_in))) {
+#else
+ if (addr->sa_family != AF_INET) {
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ }
+ return (sctp_inpcb_bind(so, addr, NULL, p));
+}
+
+#endif
+#if defined(__Userspace__)
+
+int
+sctpconn_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id)
+{
+ struct sctp_inpcb *inp;
+ struct inpcb *ip_inp;
+ int error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+ if (error) {
+ return (error);
+ }
+ }
+ error = sctp_inpcb_alloc(so, vrf_id);
+ if (error) {
+ return (error);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_CONN;
+ ip_inp = &inp->ip_inp.inp;
+ ip_inp->inp_vflag |= INP_CONN;
+ ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+ SCTP_INP_WUNLOCK(inp);
+ return (0);
+}
+
+int
+sctpconn_bind(struct socket *so, struct sockaddr *addr)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (addr != NULL) {
+#ifdef HAVE_SA_LEN
+ if ((addr->sa_family != AF_CONN) ||
+ (addr->sa_len != sizeof(struct sockaddr_conn))) {
+#else
+ if (addr->sa_family != AF_CONN) {
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ }
+ return (sctp_inpcb_bind(so, addr, NULL, NULL));
+}
+
+#endif
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+void
+sctp_close(struct socket *so)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL)
+ return;
+
+ /* Inform all the lower layer assoc that we
+ * are done.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#if defined(__Userspace__)
+ if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#else
+ if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#endif
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ } else {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 14);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ }
+ /* The socket is now detached, no matter what
+ * the state of the SCTP association.
+ */
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /* same for the rcv ones, they are only
+ * here for the accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+#endif
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return;
+}
+
+#else
+
+
+int
+sctp_detach(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ return;
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+#endif
+ }
+ sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#if defined(__Userspace__)
+ if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#else
+ if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#endif
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ } else {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ }
+ /* The socket is now detached, no matter what
+ * the state of the SCTP association.
+ */
+ SCTP_SB_CLEAR(so->so_snd);
+ /* same for the rcv ones, they are only
+ * here for the accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ /* Now disconnect */
+ so->so_pcb = NULL;
+#endif
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ return;
+#else
+ return (0);
+#endif
+}
+#endif
+
+#if defined(__Userspace__)
+/* __Userspace__ is not calling sctp_sendm */
+#endif
+#if !(defined(_WIN32) && !defined(__Userspace__))
+int
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+#else
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct proc *p);
+
+#endif
+
+int
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+#else
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct proc *p)
+{
+#endif
+ struct sctp_inpcb *inp;
+ int error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ sctp_m_freem(m);
+ return (EINVAL);
+ }
+ /* Got to have an to address if we are NOT a connected socket */
+ if ((addr == NULL) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) {
+ goto connected_type;
+ } else if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+ error = EDESTADDRREQ;
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ return (error);
+ }
+#ifdef INET6
+ if (addr->sa_family != AF_INET) {
+ /* must be a v4 address! */
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ error = EDESTADDRREQ;
+ return (error);
+ }
+#endif /* INET6 */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ SCTP_BUF_NEXT(inp->pkt_last) = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ /* FreeBSD uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+#else
+ 1 /* Open BSD does not have any "more to come"
+ * indication */
+#endif
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
+ * re-defining sosend to use the sctp_sosend. One can
+ * optionally switch back to this code (by changing back the
+ * definitions) but this is not advisable. This code is used
+ * by FreeBSD when sending a file with sendfile() though.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ int ret;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+#endif
+
+int
+sctp_disconnect(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /* No connection */
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_association *asoc;
+ struct sctp_tcb *stcb;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* We are about to be freed, out of here */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+#if defined(__Userspace__)
+ if (((so->so_options & SCTP_SO_LINGER) &&
+ (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#else
+ if (((so->so_options & SO_LINGER) &&
+ (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#endif
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) {
+ /* Left with Data unread */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
+ /* No unlock tcb assoc is gone */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return (0);
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ /* there is nothing queued to send, so done */
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN 1st time thru */
+ struct sctp_nets *netp;
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ if (stcb->asoc.alternate) {
+ netp = stcb->asoc.alternate;
+ } else {
+ netp = stcb->asoc.primary_destination;
+ }
+ sctp_send_shutdown(stcb,netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, NULL);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL);
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+ abort_anyway:
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
+ sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return (0);
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ }
+ }
+ soisdisconnecting(so);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ /* not reached */
+ } else {
+ /* UDP model does not support this */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ }
+}
+
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+int
+sctp_flush(struct socket *so, int how)
+{
+ /*
+ * We will just clear out the values and let
+ * subsequent close clear out the data, if any.
+ * Note if the user did a shutdown(SHUT_RD) they
+ * will not be able to read the data, the socket
+ * will block that from happening.
+ */
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_INP_RLOCK(inp);
+ /* For the 1 to many model this does nothing */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
+ /* First make sure the sb will be happy, we don't
+ * use these except maybe the count
+ */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_READ_LOCK(inp);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
+ SCTP_INP_READ_UNLOCK(inp);
+ SCTP_INP_WUNLOCK(inp);
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mbcnt = 0;
+ so->so_rcv.sb_mb = NULL;
+ }
+ if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
+ /* First make sure the sb will be happy, we don't
+ * use these except maybe the count
+ */
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mbcnt = 0;
+ so->so_snd.sb_mb = NULL;
+
+ }
+ return (0);
+}
+#endif
+
+int
+sctp_shutdown(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_INP_RLOCK(inp);
+ /* For UDP model this is a invalid call */
+ if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ /* Restore the flags that the soshutdown took away. */
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+#else
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_CANTRCVMORE;
+ SOCK_UNLOCK(so);
+#endif
+ /* This proc will wakeup for read and do nothing (I hope) */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ } else {
+ /*
+ * Ok, if we reach here its the TCP model and it is either
+ * a SHUT_WR or SHUT_RDWR.
+ * This means we put the shutdown flag against it.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+ struct sctp_nets *netp;
+
+ if ((so->so_state &
+ (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ return (ENOTCONN);
+ }
+ socantsendmore(so);
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ /*
+ * Ok, we hit the case that the shutdown call was
+ * made after an abort or something. Nothing to do
+ * now.
+ */
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_ECHOED) &&
+ (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN)) {
+ /* If we are not in or before ESTABLISHED, there is
+ * no protocol action required.
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ if (stcb->asoc.alternate) {
+ netp = stcb->asoc.alternate;
+ } else {
+ netp = stcb->asoc.primary_destination;
+ }
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb, netp);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, netp);
+ } else {
+ /*
+ * We still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING.
+ */
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
+ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+ abort_anyway:
+ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
+ SCTP_INP_RUNLOCK(inp);
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ op_err, SCTP_SO_LOCKED);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return (0);
+ }
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL);
+ /* XXX: Why do this in the case where we have still data queued? */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return (0);
+ }
+}
+
+/*
+ * copies a "user" presentable address and removes embedded scope, etc.
+ * returns 0 on success, 1 on error
+ */
+static uint32_t
+sctp_fill_user_address(struct sockaddr *dst, struct sockaddr *src)
+{
+#ifdef INET6
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ struct sockaddr_in6 lsa6;
+
+ src = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)src,
+ &lsa6);
+#endif
+#endif
+#ifdef HAVE_SA_LEN
+ memcpy(dst, src, src->sa_len);
+#else
+ switch (src->sa_family) {
+#ifdef INET
+ case AF_INET:
+ memcpy(dst, src, sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(dst, src, sizeof(struct sockaddr_in6));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(dst, src, sizeof(struct sockaddr_conn));
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+#endif
+ return (0);
+}
+
+
+
+static size_t
+sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ size_t limit,
+ struct sockaddr *addr,
+ uint32_t vrf_id)
+{
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ size_t actual;
+ int loopback_scope;
+#if defined(INET)
+ int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ int conn_addr_legal;
+#endif
+ struct sctp_vrf *vrf;
+
+ SCTP_IPI_ADDR_LOCK_ASSERT();
+ actual = 0;
+ if (limit == 0)
+ return (actual);
+
+ if (stcb) {
+ /* Turn on all the appropriate scope */
+ loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+ ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+ ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ local_scope = stcb->asoc.scope.local_scope;
+ site_scope = stcb->asoc.scope.site_scope;
+ ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+ } else {
+ /* Use generic values for endpoints. */
+ loopback_scope = 1;
+#if defined(INET)
+ ipv4_local_scope = 1;
+#endif
+#if defined(INET6)
+ local_scope = 1;
+ site_scope = 1;
+#endif
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+#if defined(INET6)
+ ipv6_addr_legal = 1;
+#endif
+#if defined(INET)
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ ipv4_addr_legal = 0;
+ } else {
+ ipv4_addr_legal = 1;
+ }
+#endif
+#if defined(__Userspace__)
+ conn_addr_legal = 0;
+#endif
+ } else {
+#if defined(INET6)
+ ipv6_addr_legal = 0;
+#endif
+#if defined(__Userspace__)
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ conn_addr_legal = 1;
+#if defined(INET)
+ ipv4_addr_legal = 0;
+#endif
+ } else {
+ conn_addr_legal = 0;
+#if defined(INET)
+ ipv4_addr_legal = 1;
+#endif
+ }
+#else
+#if defined(INET)
+ ipv4_addr_legal = 1;
+#endif
+#endif
+ }
+ }
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* Skip loopback if loopback_scope not set */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (stcb) {
+ /*
+ * For the BOUND-ALL case, the list
+ * associated with a TCB is Always
+ * considered a reverse list.. i.e.
+ * it lists addresses that are NOT
+ * part of the association. If this
+ * is one of those we must skip it.
+ */
+ if (sctp_is_addr_restricted(stcb,
+ sctp_ifa)) {
+ continue;
+ }
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = &sctp_ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /*
+ * we skip unspecifed
+ * addresses
+ */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ continue;
+ }
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ if (actual + sizeof(struct sockaddr_in6) > limit) {
+ return (actual);
+ }
+ in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)&addr);
+ ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport;
+ addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6));
+ actual += sizeof(struct sockaddr_in6);
+ } else {
+#endif
+ if (actual + sizeof(struct sockaddr_in) > limit) {
+ return (actual);
+ }
+ memcpy(addr, sin, sizeof(struct sockaddr_in));
+ ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport;
+ addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in));
+ actual += sizeof(struct sockaddr_in);
+#ifdef INET6
+ }
+#endif
+ } else {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+ struct sockaddr_in6 lsa6;
+#endif
+ sin6 = &sctp_ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * we skip unspecifed
+ * addresses
+ */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+ if (sa6_recoverscope(sin6) != 0)
+ /*
+ * bad link
+ * local
+ * address
+ */
+ continue;
+#else
+ lsa6 = *sin6;
+ if (in6_recoverscope(&lsa6,
+ &lsa6.sin6_addr,
+ NULL))
+ /*
+ * bad link
+ * local
+ * address
+ */
+ continue;
+ sin6 = &lsa6;
+#endif /* SCTP_KAME */
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ if (actual + sizeof(struct sockaddr_in6) > limit) {
+ return (actual);
+ }
+ memcpy(addr, sin6, sizeof(struct sockaddr_in6));
+ ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport;
+ addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6));
+ actual += sizeof(struct sockaddr_in6);
+ } else {
+ continue;
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (conn_addr_legal) {
+ if (actual + sizeof(struct sockaddr_conn) > limit) {
+ return (actual);
+ }
+ memcpy(addr, &sctp_ifa->address.sconn, sizeof(struct sockaddr_conn));
+ ((struct sockaddr_conn *)addr)->sconn_port = inp->sctp_lport;
+ addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_conn));
+ actual += sizeof(struct sockaddr_conn);
+ } else {
+ continue;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+ size_t sa_len;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (stcb) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+ continue;
+ }
+ }
+#ifdef HAVE_SA_LEN
+ sa_len = laddr->ifa->address.sa.sa_len;
+#else
+ switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa_len = sizeof(struct sockaddr_in);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ sa_len = sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ /* TSNH */
+ sa_len = 0;
+ break;
+ }
+#endif
+ if (actual + sa_len > limit) {
+ return (actual);
+ }
+ if (sctp_fill_user_address(addr, &laddr->ifa->address.sa))
+ continue;
+ switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ ((struct sockaddr_conn *)addr)->sconn_port = inp->sctp_lport;
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ addr = (struct sockaddr *)((caddr_t)addr + sa_len);
+ actual += sa_len;
+ }
+ }
+ return (actual);
+}
+
+static size_t
+sctp_fill_up_addresses(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ size_t limit,
+ struct sockaddr *addr)
+{
+ size_t size = 0;
+#ifdef SCTP_MVRF
+ uint32_t id;
+#endif
+
+ SCTP_IPI_ADDR_RLOCK();
+#ifdef SCTP_MVRF
+/*
+ * FIX ME: ?? this WILL report duplicate addresses if they appear
+ * in more than one VRF.
+ */
+ /* fill up addresses for all VRFs on the endpoint */
+ for (id = 0; (id < inp->num_vrfs) && (size < limit); id++) {
+ size += sctp_fill_up_addresses_vrf(inp, stcb, limit, addr,
+ inp->m_vrf_ids[id]);
+ addr = (struct sockaddr *)((caddr_t)addr + size);
+ }
+#else
+ /* fill up addresses for the endpoint's default vrf */
+ size = sctp_fill_up_addresses_vrf(inp, stcb, limit, addr,
+ inp->def_vrf_id);
+#endif
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (size);
+}
+
+static int
+sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
+{
+ int cnt = 0;
+ struct sctp_vrf *vrf = NULL;
+
+ /*
+ * In both sub-set bound an bound_all cases we return the MAXIMUM
+ * number of addresses that you COULD get. In reality the sub-set
+ * bound may have an exclusion list for a given TCB OR in the
+ * bound-all case a TCB may NOT include the loopback or other
+ * addresses as well.
+ */
+ SCTP_IPI_ADDR_LOCK_ASSERT();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ /* Count them if they are the right type */
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+#else
+ cnt += sizeof(struct sockaddr_in);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ cnt += sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ cnt += sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+#else
+ cnt += sizeof(struct sockaddr_in);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ cnt += sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ cnt += sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ return (cnt);
+}
+
+static int
+sctp_count_max_addresses(struct sctp_inpcb *inp)
+{
+ int cnt = 0;
+#ifdef SCTP_MVRF
+ int id;
+#endif
+
+ SCTP_IPI_ADDR_RLOCK();
+#ifdef SCTP_MVRF
+/*
+ * FIX ME: ?? this WILL count duplicate addresses if they appear
+ * in more than one VRF.
+ */
+ /* count addresses for all VRFs on the endpoint */
+ for (id = 0; id < inp->num_vrfs; id++) {
+ cnt += sctp_count_max_addresses_vrf(inp, inp->m_vrf_ids[id]);
+ }
+#else
+ /* count addresses for the endpoint's default VRF */
+ cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
+#endif
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (cnt);
+}
+
+static int
+sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
+ size_t optsize, void *p, int delay)
+{
+ int error;
+ int creat_lock_on = 0;
+ struct sctp_tcb *stcb = NULL;
+ struct sockaddr *sa;
+ unsigned int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
+ uint32_t vrf_id;
+ sctp_assoc_t *a_id;
+
+ SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ return (EALREADY);
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ creat_lock_on = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+ error = EFAULT;
+ goto out_now;
+ }
+ totaddrp = (unsigned int *)optval;
+ totaddr = *totaddrp;
+ sa = (struct sockaddr *)(totaddrp + 1);
+ error = sctp_connectx_helper_find(inp, sa, totaddr, &num_v4, &num_v6, (unsigned int)(optsize - sizeof(int)));
+ if (error != 0) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ creat_lock_on = 0;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (num_v6 > 0)) {
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (num_v4 > 0)) {
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined
+ * to a v4 addr or v4-mapped addr
+ */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ }
+#endif /* INET6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+
+ /* FIX ME: do we want to pass in a vrf on the connect call? */
+ vrf_id = inp->def_vrf_id;
+
+
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
+ inp->sctp_ep.pre_open_stream_count,
+ inp->sctp_ep.port,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ (struct thread *)p,
+#elif defined(_WIN32) && !defined(__Userspace__)
+ (PKTHREAD)p,
+#else
+ (struct proc *)p,
+#endif
+ SCTP_INITIALIZE_AUTH_PARAMS);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ goto out_now;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ /* move to second address */
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+ break;
+#endif
+ default:
+ break;
+ }
+
+ error = 0;
+ sctp_connectx_helper_add(stcb, sa, (totaddr-1), &error);
+ /* Fill in the return id */
+ if (error) {
+ goto out_now;
+ }
+ a_id = (sctp_assoc_t *)optval;
+ *a_id = sctp_get_associd(stcb);
+
+ if (delay) {
+ /* doing delayed connection */
+ stcb->asoc.delayed_connection = 1;
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
+ } else {
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ out_now:
+ if (creat_lock_on) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+}
+
+#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
+ SCTP_INP_RLOCK(inp); \
+ stcb = LIST_FIRST(&inp->sctp_asoc_list); \
+ if (stcb) { \
+ SCTP_TCB_LOCK(stcb); \
+ } \
+ SCTP_INP_RUNLOCK(inp); \
+ } else if (assoc_id > SCTP_ALL_ASSOC) { \
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
+ if (stcb == NULL) { \
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
+ error = ENOENT; \
+ break; \
+ } \
+ } else { \
+ stcb = NULL; \
+ } \
+}
+
+
+#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
+ if (size < sizeof(type)) { \
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
+ error = EINVAL; \
+ break; \
+ } else { \
+ destp = (type *)srcp; \
+ } \
+}
+
+#if defined(__Userspace__)
+int
+#else
+static int
+#endif
+sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
+ void *p) {
+ struct sctp_inpcb *inp = NULL;
+ int error, val = 0;
+ struct sctp_tcb *stcb = NULL;
+
+ if (optval == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ error = 0;
+
+ switch (optname) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ case SCTP_USE_EXT_RCVINFO:
+ SCTP_INP_RLOCK(inp);
+ switch (optname) {
+ case SCTP_DISABLE_FRAGMENTS:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
+ break;
+ case SCTP_AUTO_ASCONF:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* only valid for bound all sockets */
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto flags_out;
+ }
+ break;
+ case SCTP_EXPLICIT_EOR:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ break;
+ case SCTP_NODELAY:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
+ break;
+ case SCTP_AUTOCLOSE:
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
+ val = sctp_ticks_to_secs(inp->sctp_ep.auto_close_time);
+ else
+ val = 0;
+ break;
+
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ } /* end switch (sopt->sopt_name) */
+ if (*optsize < sizeof(val)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ flags_out:
+ SCTP_INP_RUNLOCK(inp);
+ if (error == 0) {
+ /* return the option value */
+ *(int *)optval = val;
+ *optsize = sizeof(val);
+ }
+ break;
+ case SCTP_GET_PACKET_LOG:
+ {
+#ifdef SCTP_PACKET_LOGGING
+ uint8_t *target;
+ int ret;
+
+ SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
+ ret = sctp_copy_out_packet_log(target , (int)*optsize);
+ *optsize = ret;
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+#endif
+ break;
+ }
+ case SCTP_REUSE_PORT:
+ {
+ uint32_t *value;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ *value = inp->partial_delivery_point;
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_FRAGMENT_INTERLEAVE:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
+ *value = SCTP_FRAG_LEVEL_2;
+ } else {
+ *value = SCTP_FRAG_LEVEL_1;
+ }
+ } else {
+ *value = SCTP_FRAG_LEVEL_0;
+ }
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_INTERLEAVING_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.idata_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->idata_supported) {
+ av->assoc_value = 1;
+ } else {
+ av->assoc_value = 0;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_CMT_ON_OFF:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.sctp_cmt_on_off;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_cmt_on_off;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_PLUGGABLE_CC:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.congestion_control_module;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_CC_OPTION:
+ {
+ struct sctp_cc_option *cc_opt;
+
+ SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize);
+ SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
+ if (stcb == NULL) {
+ error = EINVAL;
+ } else {
+ if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
+ error = ENOTSUP;
+ } else {
+ error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 0, cc_opt);
+ *optsize = sizeof(struct sctp_cc_option);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ }
+ case SCTP_PLUGGABLE_SS:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.stream_scheduling_module;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_ep.sctp_default_ss_module;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_SS_VALUE:
+ {
+ struct sctp_stream_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ if ((av->stream_id >= stcb->asoc.streamoutcnt) ||
+ (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
+ &av->stream_value) < 0)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ *optsize = sizeof(struct sctp_stream_value);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Can't get stream value without association */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+ }
+ case SCTP_GET_ADDR_LEN:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ error = EINVAL;
+#ifdef INET
+ if (av->assoc_value == AF_INET) {
+ av->assoc_value = sizeof(struct sockaddr_in);
+ error = 0;
+ }
+#endif
+#ifdef INET6
+ if (av->assoc_value == AF_INET6) {
+ av->assoc_value = sizeof(struct sockaddr_in6);
+ error = 0;
+ }
+#endif
+#if defined(__Userspace__)
+ if (av->assoc_value == AF_CONN) {
+ av->assoc_value = sizeof(struct sockaddr_conn);
+ error = 0;
+ }
+#endif
+ if (error) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_GET_ASSOC_NUMBER:
+ {
+ uint32_t *value, cnt;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* Can't do this for a 1-1 socket */
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ cnt = 0;
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ cnt++;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ *value = cnt;
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_GET_ASSOC_ID_LIST:
+ {
+ struct sctp_assoc_ids *ids;
+ uint32_t at;
+ size_t limit;
+
+ SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* Can't do this for a 1-1 socket */
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ at = 0;
+ limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ if (at < limit) {
+ ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
+ if (at == 0) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (error == 0) {
+ ids->gaids_number_of_ids = at;
+ *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
+ }
+ break;
+ }
+ case SCTP_CONTEXT:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.context;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_context;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_VRF_ID:
+ {
+ uint32_t *default_vrfid;
+
+ SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
+ *default_vrfid = inp->def_vrf_id;
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_GET_ASOC_VRF:
+ {
+ struct sctp_assoc_value *id;
+
+ SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, id->assoc_id);
+ if (stcb == NULL) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ id->assoc_value = stcb->asoc.vrf_id;
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_GET_VRF_IDS:
+ {
+#ifdef SCTP_MVRF
+ int siz_needed;
+ uint32_t *vrf_ids;
+
+ SCTP_CHECK_AND_CAST(vrf_ids, optval, uint32_t, *optsize);
+ siz_needed = inp->num_vrfs * sizeof(uint32_t);
+ if (*optsize < siz_needed) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ memcpy(vrf_ids, inp->m_vrf_ids, siz_needed);
+ *optsize = siz_needed;
+ }
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+#endif
+ break;
+ }
+ case SCTP_GET_NONCE_VALUES:
+ {
+ struct sctp_get_nonce_values *gnv;
+
+ SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
+ SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
+
+ if (stcb) {
+ gnv->gn_peers_tag = stcb->asoc.peer_vtag;
+ gnv->gn_local_tag = stcb->asoc.my_vtag;
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_get_nonce_values);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ break;
+ }
+ case SCTP_DELAYED_SACK:
+ {
+ struct sctp_sack_info *sack;
+
+ SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+ if (stcb) {
+ sack->sack_delay = stcb->asoc.delayed_ack;
+ sack->sack_freq = stcb->asoc.sack_freq;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (sack->sack_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ sack->sack_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_sack_info);
+ }
+ break;
+ }
+ case SCTP_GET_SNDBUF_USE:
+ {
+ struct sctp_sockstat *ss;
+
+ SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
+ SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
+
+ if (stcb) {
+ ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
+ ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
+ stcb->asoc.size_on_all_streams);
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_sockstat);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ break;
+ }
+ case SCTP_MAX_BURST:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.max_burst;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_ep.max_burst;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_MAXSEG:
+ {
+ struct sctp_assoc_value *av;
+ int ovh;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
+ av->assoc_value = 0;
+ else
+ av->assoc_value = inp->sctp_frag_point - ovh;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_GET_STAT_LOG:
+ error = sctp_fill_stat_log(optval, optsize);
+ break;
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
+ memset(events, 0, sizeof(struct sctp_event_subscribe));
+ SCTP_INP_RLOCK(inp);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
+ events->sctp_data_io_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
+ events->sctp_association_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
+ events->sctp_address_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+ events->sctp_send_failure_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
+ events->sctp_peer_error_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+ events->sctp_shutdown_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
+ events->sctp_partial_delivery_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
+ events->sctp_adaptation_layer_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
+ events->sctp_authentication_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
+ events->sctp_sender_dry_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+ events->sctp_stream_reset_event = 1;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(struct sctp_event_subscribe);
+ break;
+ }
+ case SCTP_ADAPTATION_LAYER:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+
+ SCTP_INP_RLOCK(inp);
+ *value = inp->sctp_ep.adaptation_layer_indicator;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ SCTP_INP_RLOCK(inp);
+ *value = inp->sctp_ep.initial_sequence_debug;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_GET_LOCAL_ADDR_SIZE:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ SCTP_INP_RLOCK(inp);
+ *value = sctp_count_max_addresses(inp);
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ break;
+ }
+ case SCTP_GET_REMOTE_ADDR_SIZE:
+ {
+ uint32_t *value;
+ size_t size;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ /* FIXME MT: change to sctp_assoc_value? */
+ SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) *value);
+
+ if (stcb) {
+ size = 0;
+ /* Count the sizes */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ size += sizeof(struct sockaddr_in6);
+ } else {
+ size += sizeof(struct sockaddr_in);
+ }
+#else
+ size += sizeof(struct sockaddr_in);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ size += sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ size += sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ *value = (uint32_t) size;
+ *optsize = sizeof(uint32_t);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ break;
+ }
+ case SCTP_GET_PEER_ADDRESSES:
+ /*
+ * Get the address information, an array is passed in to
+ * fill up we pack it.
+ */
+ {
+ size_t cpsz, left;
+ struct sockaddr *addr;
+ struct sctp_nets *net;
+ struct sctp_getaddresses *saddr;
+
+ SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+ SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+ if (stcb) {
+ left = *optsize - offsetof(struct sctp_getaddresses, addr);
+ *optsize = offsetof(struct sctp_getaddresses, addr);
+ addr = &saddr->addr[0].sa;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ cpsz = sizeof(struct sockaddr_in6);
+ } else {
+ cpsz = sizeof(struct sockaddr_in);
+ }
+#else
+ cpsz = sizeof(struct sockaddr_in);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ cpsz = sizeof(struct sockaddr_in6);
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ cpsz = sizeof(struct sockaddr_conn);
+ break;
+#endif
+ default:
+ cpsz = 0;
+ break;
+ }
+ if (cpsz == 0) {
+ break;
+ }
+ if (left < cpsz) {
+ /* not enough room. */
+ break;
+ }
+#if defined(INET) && defined(INET6)
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
+ (net->ro._l_addr.sa.sa_family == AF_INET)) {
+ /* Must map the address */
+ in6_sin_2_v4mapsin6(&net->ro._l_addr.sin,
+ (struct sockaddr_in6 *)&addr);
+ } else {
+ memcpy(addr, &net->ro._l_addr, cpsz);
+ }
+#else
+ memcpy(addr, &net->ro._l_addr, cpsz);
+#endif
+ ((struct sockaddr_in *)addr)->sin_port = stcb->rport;
+
+ addr = (struct sockaddr *)((caddr_t)addr + cpsz);
+ left -= cpsz;
+ *optsize += cpsz;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ break;
+ }
+ case SCTP_GET_LOCAL_ADDRESSES:
+ {
+ size_t limit, actual;
+ struct sctp_getaddresses *saddr;
+
+ SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+ SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+ limit = *optsize - offsetof(struct sctp_getaddresses, addr);
+ actual = sctp_fill_up_addresses(inp, stcb, limit, &saddr->addr[0].sa);
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ *optsize = offsetof(struct sctp_getaddresses, addr) + actual;
+ break;
+ }
+ case SCTP_PEER_ADDR_PARAMS:
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
+ SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (paddrp->spp_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&paddrp->spp_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&paddrp->spp_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&paddrp->spp_address;
+ }
+#else
+ addr = (struct sockaddr *)&paddrp->spp_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+
+ if (stcb != NULL) {
+ /* Applies to the specific association */
+ paddrp->spp_flags = 0;
+ if (net != NULL) {
+ paddrp->spp_hbinterval = net->heart_beat_delay;
+ paddrp->spp_pathmaxrxt = net->failure_threshold;
+ paddrp->spp_pathmtu = net->mtu;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ paddrp->spp_pathmtu -= SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ paddrp->spp_pathmtu -= SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ paddrp->spp_pathmtu -= sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+ /* get flags for HB */
+ if (net->dest_state & SCTP_ADDR_NOHB) {
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ } else {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ }
+ /* get flags for PMTU */
+ if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+ paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+ } else {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ }
+ if (net->dscp & 0x01) {
+ paddrp->spp_dscp = net->dscp & 0xfc;
+ paddrp->spp_flags |= SPP_DSCP;
+ }
+#ifdef INET6
+ if ((net->ro._l_addr.sa.sa_family == AF_INET6) &&
+ (net->flowlabel & 0x80000000)) {
+ paddrp->spp_ipv6_flowlabel = net->flowlabel & 0x000fffff;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ } else {
+ /*
+ * No destination so return default
+ * value
+ */
+ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
+ paddrp->spp_pathmtu = stcb->asoc.default_mtu;
+ if (stcb->asoc.default_dscp & 0x01) {
+ paddrp->spp_dscp = stcb->asoc.default_dscp & 0xfc;
+ paddrp->spp_flags |= SPP_DSCP;
+ }
+#ifdef INET6
+ if (stcb->asoc.default_flowlabel & 0x80000000) {
+ paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel & 0x000fffff;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ /* default settings should be these */
+ if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ } else {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ }
+ if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+ paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+ } else {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ }
+ paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
+ }
+ paddrp->spp_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC))) {
+ /* Use endpoint defaults */
+ SCTP_INP_RLOCK(inp);
+ paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
+ paddrp->spp_hbinterval = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ paddrp->spp_assoc_id = SCTP_FUTURE_ASSOC;
+ /* get inp's default */
+ if (inp->sctp_ep.default_dscp & 0x01) {
+ paddrp->spp_dscp = inp->sctp_ep.default_dscp & 0xfc;
+ paddrp->spp_flags |= SPP_DSCP;
+ }
+#ifdef INET6
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (inp->sctp_ep.default_flowlabel & 0x80000000)) {
+ paddrp->spp_ipv6_flowlabel = inp->sctp_ep.default_flowlabel & 0x000fffff;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ paddrp->spp_pathmtu = inp->sctp_ep.default_mtu;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ }
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_paddrparams);
+ }
+ break;
+ }
+ case SCTP_GET_PEER_ADDR_INFO:
+ {
+ struct sctp_paddrinfo *paddri;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (paddri->spinfo_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&paddri->spinfo_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&paddri->spinfo_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&paddri->spinfo_address;
+ }
+#else
+ addr = (struct sockaddr *)&paddri->spinfo_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if ((stcb != NULL) && (net != NULL)) {
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /* It's unconfirmed */
+ paddri->spinfo_state = SCTP_UNCONFIRMED;
+ } else if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* It's active */
+ paddri->spinfo_state = SCTP_ACTIVE;
+ } else {
+ /* It's inactive */
+ paddri->spinfo_state = SCTP_INACTIVE;
+ }
+ paddri->spinfo_cwnd = net->cwnd;
+ paddri->spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT;
+ paddri->spinfo_rto = net->RTO;
+ paddri->spinfo_assoc_id = sctp_get_associd(stcb);
+ paddri->spinfo_mtu = net->mtu;
+ switch (addr->sa_family) {
+#if defined(INET)
+ case AF_INET:
+ paddri->spinfo_mtu -= SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ paddri->spinfo_mtu -= SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ paddri->spinfo_mtu -= sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_paddrinfo);
+ } else {
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ break;
+ }
+ case SCTP_PCB_STATUS:
+ {
+ struct sctp_pcbinfo *spcb;
+
+ SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
+ sctp_fill_pcbinfo(spcb);
+ *optsize = sizeof(struct sctp_pcbinfo);
+ break;
+ }
+ case SCTP_STATUS:
+ {
+ struct sctp_nets *net;
+ struct sctp_status *sstat;
+
+ SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
+
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ sstat->sstat_state = sctp_map_assoc_state(stcb->asoc.state);
+ sstat->sstat_assoc_id = sctp_get_associd(stcb);
+ sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
+ sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
+ /*
+ * We can't include chunks that have been passed to
+ * the socket layer. Only things in queue.
+ */
+ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
+ stcb->asoc.cnt_on_all_streams);
+
+
+ sstat->sstat_instrms = stcb->asoc.streamincnt;
+ sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
+ sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ net = stcb->asoc.primary_destination;
+ if (net != NULL) {
+#ifdef HAVE_SA_LEN
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &net->ro._l_addr,
+ ((struct sockaddr *)(&net->ro._l_addr))->sa_len);
+#else
+ switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) {
+#if defined(INET)
+ case AF_INET:
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &net->ro._l_addr,
+ sizeof(struct sockaddr_in));
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &net->ro._l_addr,
+ sizeof(struct sockaddr_in6));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &net->ro._l_addr,
+ sizeof(struct sockaddr_conn));
+ break;
+#endif
+ default:
+ break;
+ }
+#endif
+ ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
+ /*
+ * Again the user can get info from sctp_constants.h
+ * for what the state of the network is.
+ */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /* It's unconfirmed */
+ sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED;
+ } else if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* It's active */
+ sstat->sstat_primary.spinfo_state = SCTP_ACTIVE;
+ } else {
+ /* It's inactive */
+ sstat->sstat_primary.spinfo_state = SCTP_INACTIVE;
+ }
+ sstat->sstat_primary.spinfo_cwnd = net->cwnd;
+ sstat->sstat_primary.spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT;
+ sstat->sstat_primary.spinfo_rto = net->RTO;
+ sstat->sstat_primary.spinfo_mtu = net->mtu;
+ switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) {
+#if defined(INET)
+ case AF_INET:
+ sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ sstat->sstat_primary.spinfo_mtu -= sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+ } else {
+ memset(&sstat->sstat_primary, 0, sizeof(struct sctp_paddrinfo));
+ }
+ sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_status);
+ break;
+ }
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+
+ SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+ if (stcb) {
+ srto->srto_initial = stcb->asoc.initial_rto;
+ srto->srto_max = stcb->asoc.maxrto;
+ srto->srto_min = stcb->asoc.minrto;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (srto->srto_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ srto->srto_initial = inp->sctp_ep.initial_rto;
+ srto->srto_max = inp->sctp_ep.sctp_maxrto;
+ srto->srto_min = inp->sctp_ep.sctp_minrto;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_rtoinfo);
+ }
+ break;
+ }
+ case SCTP_TIMEOUTS:
+ {
+ struct sctp_timeouts *stimo;
+
+ SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize);
+ SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id);
+
+ if (stcb) {
+ stimo->stimo_init= stcb->asoc.timoinit;
+ stimo->stimo_data= stcb->asoc.timodata;
+ stimo->stimo_sack= stcb->asoc.timosack;
+ stimo->stimo_shutdown= stcb->asoc.timoshutdown;
+ stimo->stimo_heartbeat= stcb->asoc.timoheartbeat;
+ stimo->stimo_cookie= stcb->asoc.timocookie;
+ stimo->stimo_shutdownack= stcb->asoc.timoshutdownack;
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_timeouts);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+ }
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+
+ SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+
+ if (stcb) {
+ sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(stcb->asoc.cookie_life);
+ sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
+ sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+ sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
+ sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(inp->sctp_ep.def_cookie_life);
+ sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
+ sasoc->sasoc_number_peer_destinations = 0;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assocparams);
+ }
+ break;
+ }
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+ if (stcb) {
+ memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_sndrcvinfo);
+ }
+ break;
+ }
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
+ SCTP_INP_RLOCK(inp);
+ sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
+ sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
+ sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
+ sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(struct sctp_initmsg);
+ break;
+ }
+ case SCTP_PRIMARY_ADDR:
+ /* we allow a "get" operation on this */
+ {
+ struct sctp_setprim *ssp;
+
+ SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
+ SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
+
+ if (stcb) {
+ union sctp_sockstore *addr;
+
+ addr = &stcb->asoc.primary_destination->ro._l_addr;
+ switch (addr->sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ in6_sin_2_v4mapsin6(&addr->sin,
+ (struct sockaddr_in6 *)&ssp->ssp_addr);
+ } else {
+ memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in));
+ }
+#else
+ memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in));
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ memcpy(&ssp->ssp_addr, &addr->sin6, sizeof(struct sockaddr_in6));
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(&ssp->ssp_addr, &addr->sconn, sizeof(struct sockaddr_conn));
+ break;
+#endif
+ default:
+ break;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(struct sctp_setprim);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+ }
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint32_t size;
+ int i;
+
+ SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
+
+ SCTP_INP_RLOCK(inp);
+ hmaclist = inp->sctp_ep.local_hmacs;
+ if (hmaclist == NULL) {
+ /* no HMACs to return */
+ *optsize = sizeof(*shmac);
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* is there room for all of the hmac ids? */
+ size = sizeof(*shmac) + (hmaclist->num_algo *
+ sizeof(shmac->shmac_idents[0]));
+ if ((size_t)(*optsize) < size) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* copy in the list */
+ shmac->shmac_number_of_idents = hmaclist->num_algo;
+ for (i = 0; i < hmaclist->num_algo; i++) {
+ shmac->shmac_idents[i] = hmaclist->hmac[i];
+ }
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = size;
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
+ SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+ if (stcb) {
+ /* get the active key on the assoc */
+ scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (scact->scact_assoc_id == SCTP_FUTURE_ASSOC))) {
+ /* get the endpoint active key */
+ SCTP_INP_RLOCK(inp);
+ scact->scact_keynumber = inp->sctp_ep.default_keyid;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_authkeyid);
+ }
+ break;
+ }
+ case SCTP_LOCAL_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ size_t size = 0;
+
+ SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+ if (stcb) {
+ /* get off the assoc */
+ chklist = stcb->asoc.local_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ sac->gauth_number_of_chunks = (uint32_t)size;
+ *optsize = sizeof(struct sctp_authchunks) + size;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (sac->gauth_assoc_id == SCTP_FUTURE_ASSOC))) {
+ /* get off the endpoint */
+ SCTP_INP_RLOCK(inp);
+ chklist = inp->sctp_ep.local_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ sac->gauth_number_of_chunks = (uint32_t)size;
+ *optsize = sizeof(struct sctp_authchunks) + size;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_PEER_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ size_t size = 0;
+
+ SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+ if (stcb) {
+ /* get off the assoc */
+ chklist = stcb->asoc.peer_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ sac->gauth_number_of_chunks = (uint32_t)size;
+ *optsize = sizeof(struct sctp_authchunks) + size;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ break;
+ }
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+ case SCTP_PEELOFF:
+ {
+ struct sctp_peeloff_opt *peeloff;
+
+ SCTP_CHECK_AND_CAST(peeloff, optval, struct sctp_peeloff_opt, *optsize);
+ /* do the peeloff */
+ error = sctp_peeloff_option(p, peeloff);
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_peeloff_opt);
+ }
+ }
+ break;
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+ case SCTP_EVENT:
+ {
+ struct sctp_event *event;
+ uint32_t event_type;
+
+ SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, *optsize);
+ SCTP_FIND_STCB(inp, stcb, event->se_assoc_id);
+
+ switch (event->se_type) {
+ case SCTP_ASSOC_CHANGE:
+ event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT;
+ break;
+ case SCTP_PEER_ADDR_CHANGE:
+ event_type = SCTP_PCB_FLAGS_RECVPADDREVNT;
+ break;
+ case SCTP_REMOTE_ERROR:
+ event_type = SCTP_PCB_FLAGS_RECVPEERERR;
+ break;
+ case SCTP_SEND_FAILED:
+ event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
+ break;
+ case SCTP_SHUTDOWN_EVENT:
+ event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
+ break;
+ case SCTP_ADAPTATION_INDICATION:
+ event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT;
+ break;
+ case SCTP_PARTIAL_DELIVERY_EVENT:
+ event_type = SCTP_PCB_FLAGS_PDAPIEVNT;
+ break;
+ case SCTP_AUTHENTICATION_EVENT:
+ event_type = SCTP_PCB_FLAGS_AUTHEVNT;
+ break;
+ case SCTP_STREAM_RESET_EVENT:
+ event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT;
+ break;
+ case SCTP_SENDER_DRY_EVENT:
+ event_type = SCTP_PCB_FLAGS_DRYEVNT;
+ break;
+ case SCTP_NOTIFICATIONS_STOPPED_EVENT:
+ event_type = 0;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+ error = ENOTSUP;
+ break;
+ case SCTP_ASSOC_RESET_EVENT:
+ event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT;
+ break;
+ case SCTP_STREAM_CHANGE_EVENT:
+ event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT;
+ break;
+ case SCTP_SEND_FAILED_EVENT:
+ event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT;
+ break;
+ default:
+ event_type = 0;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (event_type > 0) {
+ if (stcb) {
+ event->se_on = sctp_stcb_is_feature_on(inp, stcb, event_type);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (event->se_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ event->se_on = sctp_is_feature_on(inp, event_type);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ }
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_event);
+ }
+ break;
+ }
+ case SCTP_RECVRCVINFO:
+ {
+ int onoff;
+
+ if (*optsize < sizeof(int)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ SCTP_INP_RLOCK(inp);
+ onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (error == 0) {
+ /* return the option value */
+ *(int *)optval = onoff;
+ *optsize = sizeof(int);
+ }
+ break;
+ }
+ case SCTP_RECVNXTINFO:
+ {
+ int onoff;
+
+ if (*optsize < sizeof(int)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ SCTP_INP_RLOCK(inp);
+ onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (error == 0) {
+ /* return the option value */
+ *(int *)optval = onoff;
+ *optsize = sizeof(int);
+ }
+ break;
+ }
+ case SCTP_DEFAULT_SNDINFO:
+ {
+ struct sctp_sndinfo *info;
+
+ SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id);
+
+ if (stcb) {
+ info->snd_sid = stcb->asoc.def_send.sinfo_stream;
+ info->snd_flags = stcb->asoc.def_send.sinfo_flags;
+ info->snd_flags &= 0xfff0;
+ info->snd_ppid = stcb->asoc.def_send.sinfo_ppid;
+ info->snd_context = stcb->asoc.def_send.sinfo_context;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (info->snd_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ info->snd_sid = inp->def_send.sinfo_stream;
+ info->snd_flags = inp->def_send.sinfo_flags;
+ info->snd_flags &= 0xfff0;
+ info->snd_ppid = inp->def_send.sinfo_ppid;
+ info->snd_context = inp->def_send.sinfo_context;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_sndinfo);
+ }
+ break;
+ }
+ case SCTP_DEFAULT_PRINFO:
+ {
+ struct sctp_default_prinfo *info;
+
+ SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id);
+
+ if (stcb) {
+ info->pr_policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+ info->pr_value = stcb->asoc.def_send.sinfo_timetolive;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (info->pr_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ info->pr_policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags);
+ info->pr_value = inp->def_send.sinfo_timetolive;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_default_prinfo);
+ }
+ break;
+ }
+ case SCTP_PEER_ADDR_THLDS:
+ {
+ struct sctp_paddrthlds *thlds;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, *optsize);
+ SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (thlds->spt_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&thlds->spt_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&thlds->spt_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&thlds->spt_address;
+ }
+#else
+ addr = (struct sockaddr *)&thlds->spt_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+
+ if (stcb != NULL) {
+ if (net != NULL) {
+ thlds->spt_pathmaxrxt = net->failure_threshold;
+ thlds->spt_pathpfthld = net->pf_threshold;
+ thlds->spt_pathcpthld = 0xffff;
+ } else {
+ thlds->spt_pathmaxrxt = stcb->asoc.def_net_failure;
+ thlds->spt_pathpfthld = stcb->asoc.def_net_pf_threshold;
+ thlds->spt_pathcpthld = 0xffff;
+ }
+ thlds->spt_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC))) {
+ /* Use endpoint defaults */
+ SCTP_INP_RLOCK(inp);
+ thlds->spt_pathmaxrxt = inp->sctp_ep.def_net_failure;
+ thlds->spt_pathpfthld = inp->sctp_ep.def_net_pf_threshold;
+ thlds->spt_pathcpthld = 0xffff;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_paddrthlds);
+ }
+ break;
+ }
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ {
+ struct sctp_udpencaps *encaps;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, *optsize);
+ SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (encaps->sue_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&encaps->sue_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&encaps->sue_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&encaps->sue_address;
+ }
+#else
+ addr = (struct sockaddr *)&encaps->sue_address;
+#endif
+ if (stcb) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+
+ if (stcb != NULL) {
+ if (net) {
+ encaps->sue_port = net->port;
+ } else {
+ encaps->sue_port = stcb->asoc.port;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ encaps->sue_port = inp->sctp_ep.port;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_udpencaps);
+ }
+ break;
+ }
+ case SCTP_ECN_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.ecn_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->ecn_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_PR_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.prsctp_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->prsctp_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_AUTH_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.auth_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->auth_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_ASCONF_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.asconf_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->asconf_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_RECONFIG_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.reconfig_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->reconfig_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_NRSACK_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.nrsack_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->nrsack_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_PKTDROP_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.pktdrop_supported;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->pktdrop_supported;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_ENABLE_STREAM_RESET:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = (uint32_t)stcb->asoc.local_strreset_support;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = (uint32_t)inp->local_strreset_support;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ case SCTP_PR_STREAM_STATUS:
+ {
+ struct sctp_prstatus *sprstat;
+ uint16_t sid;
+ uint16_t policy;
+
+ SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id);
+
+ sid = sprstat->sprstat_sid;
+ policy = sprstat->sprstat_policy;
+#if defined(SCTP_DETAILED_STR_STATS)
+ if ((stcb != NULL) &&
+ (sid < stcb->asoc.streamoutcnt) &&
+ (policy != SCTP_PR_SCTP_NONE) &&
+ ((policy <= SCTP_PR_SCTP_MAX) ||
+ (policy == SCTP_PR_SCTP_ALL))) {
+ if (policy == SCTP_PR_SCTP_ALL) {
+ sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0];
+ sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0];
+ } else {
+ sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[policy];
+ sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[policy];
+ }
+#else
+ if ((stcb != NULL) &&
+ (sid < stcb->asoc.streamoutcnt) &&
+ (policy == SCTP_PR_SCTP_ALL)) {
+ sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0];
+ sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0];
+#endif
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_prstatus);
+ }
+ break;
+ }
+ case SCTP_PR_ASSOC_STATUS:
+ {
+ struct sctp_prstatus *sprstat;
+ uint16_t policy;
+
+ SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id);
+
+ policy = sprstat->sprstat_policy;
+ if ((stcb != NULL) &&
+ (policy != SCTP_PR_SCTP_NONE) &&
+ ((policy <= SCTP_PR_SCTP_MAX) ||
+ (policy == SCTP_PR_SCTP_ALL))) {
+ if (policy == SCTP_PR_SCTP_ALL) {
+ sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[0];
+ sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[0];
+ } else {
+ sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[policy];
+ sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[policy];
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_prstatus);
+ }
+ break;
+ }
+ case SCTP_MAX_CWND:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.max_cwnd;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->max_cwnd;
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ if (error == 0) {
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ }
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ break;
+ } /* end switch (sopt->sopt_name) */
+ if (error) {
+ *optsize = 0;
+ }
+ return (error);
+}
+
+#if defined(__Userspace__)
+int
+#else
+static int
+#endif
+sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
+ void *p)
+{
+ int error, set_opt;
+ uint32_t *mopt;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_inpcb *inp = NULL;
+ uint32_t vrf_id;
+
+ if (optval == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ vrf_id = inp->def_vrf_id;
+
+ error = 0;
+ switch (optname) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_USE_EXT_RCVINFO:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ /* copy in the option value */
+ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+ set_opt = 0;
+ if (error)
+ break;
+ switch (optname) {
+ case SCTP_DISABLE_FRAGMENTS:
+ set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
+ break;
+ case SCTP_AUTO_ASCONF:
+ /*
+ * NOTE: we don't really support this flag
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* only valid for bound all sockets */
+ if ((SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) &&
+ (*mopt != 0)) {
+ /* forbidden by admin */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPERM);
+ return (EPERM);
+ }
+ set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ break;
+ case SCTP_EXPLICIT_EOR:
+ set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ break;
+ case SCTP_NODELAY:
+ set_opt = SCTP_PCB_FLAGS_NODELAY;
+ break;
+ case SCTP_AUTOCLOSE:
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
+ /*
+ * The value is in ticks. Note this does not effect
+ * old associations, only new ones.
+ */
+ inp->sctp_ep.auto_close_time = sctp_secs_to_ticks(*mopt);
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ if (*mopt != 0) {
+ sctp_feature_on(inp, set_opt);
+ } else {
+ sctp_feature_off(inp, set_opt);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ case SCTP_REUSE_PORT:
+ {
+ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* Can't set it after we are bound */
+ error = EINVAL;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ if (optval)
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ else
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ break;
+ }
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
+ if (*value > SCTP_SB_LIMIT_RCV(so)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ inp->partial_delivery_point = *value;
+ break;
+ }
+ case SCTP_FRAGMENT_INTERLEAVE:
+ /* not yet until we re-write sctp_recvmsg() */
+ {
+ uint32_t *level;
+
+ SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
+ if (*level == SCTP_FRAG_LEVEL_2) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (*level == SCTP_FRAG_LEVEL_1) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (*level == SCTP_FRAG_LEVEL_0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+ }
+ case SCTP_INTERLEAVING_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->idata_supported = 0;
+ } else {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS))) {
+ inp->idata_supported = 1;
+ } else {
+ /* Must have Frag interleave and stream interleave on */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_CMT_ON_OFF:
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ if (av->assoc_value > SCTP_CMT_MAX) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ stcb->asoc.sctp_cmt_on_off = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_cmt_on_off = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.sctp_cmt_on_off = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ }
+ break;
+ case SCTP_PLUGGABLE_CC:
+ {
+ struct sctp_assoc_value *av;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ if ((av->assoc_value != SCTP_CC_RFC2581) &&
+ (av->assoc_value != SCTP_CC_HSTCP) &&
+ (av->assoc_value != SCTP_CC_HTCP) &&
+ (av->assoc_value != SCTP_CC_RTCC)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
+ stcb->asoc.congestion_control_module = av->assoc_value;
+ if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
+ stcb->asoc.congestion_control_module = av->assoc_value;
+ if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_CC_OPTION:
+ {
+ struct sctp_cc_option *cc_opt;
+
+ SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize);
+ SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
+ if (stcb == NULL) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (cc_opt->aid_value.assoc_id == SCTP_CURRENT_ASSOC)) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.cc_functions.sctp_cwnd_socket_option) {
+ (*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 1, cc_opt);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ error = EINVAL;
+ }
+ } else {
+ if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
+ error = ENOTSUP;
+ } else {
+ error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 1,
+ cc_opt);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ }
+ case SCTP_PLUGGABLE_SS:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ if ((av->assoc_value != SCTP_SS_DEFAULT) &&
+ (av->assoc_value != SCTP_SS_ROUND_ROBIN) &&
+ (av->assoc_value != SCTP_SS_ROUND_ROBIN_PACKET) &&
+ (av->assoc_value != SCTP_SS_PRIORITY) &&
+ (av->assoc_value != SCTP_SS_FAIR_BANDWITH) &&
+ (av->assoc_value != SCTP_SS_FIRST_COME)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1);
+ stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value];
+ stcb->asoc.stream_scheduling_module = av->assoc_value;
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.sctp_default_ss_module = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ SCTP_TCB_SEND_LOCK(stcb);
+ stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1);
+ stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value];
+ stcb->asoc.stream_scheduling_module = av->assoc_value;
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_SS_VALUE:
+ {
+ struct sctp_stream_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ if ((av->stream_id >= stcb->asoc.streamoutcnt) ||
+ (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
+ av->stream_value) < 0)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_CURRENT_ASSOC)) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (av->stream_id < stcb->asoc.streamoutcnt) {
+ stcb->asoc.ss_functions.sctp_ss_set_value(stcb,
+ &stcb->asoc,
+ &stcb->asoc.strmout[av->stream_id],
+ av->stream_value);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /* Can't set stream value without association */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_CLR_STAT_LOG:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ break;
+ case SCTP_CONTEXT:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ stcb->asoc.context = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_context = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.context = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_VRF_ID:
+ {
+ uint32_t *default_vrfid;
+#ifdef SCTP_MVRF
+ int i;
+#endif
+ SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
+ if (*default_vrfid > SCTP_MAX_VRF_ID) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ /* The VRF must be in the VRF list */
+ if (*default_vrfid == inp->m_vrf_ids[i]) {
+ SCTP_INP_WLOCK(inp);
+ inp->def_vrf_id = *default_vrfid;
+ SCTP_INP_WUNLOCK(inp);
+ goto sctp_done;
+ }
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+#else
+ inp->def_vrf_id = *default_vrfid;
+#endif
+#ifdef SCTP_MVRF
+ sctp_done:
+#endif
+ break;
+ }
+ case SCTP_DEL_VRF_ID:
+ {
+#ifdef SCTP_MVRF
+ uint32_t *del_vrfid;
+ int i, fnd = 0;
+
+ SCTP_CHECK_AND_CAST(del_vrfid, optval, uint32_t, optsize);
+ if (*del_vrfid > SCTP_MAX_VRF_ID) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (inp->num_vrfs == 1) {
+ /* Can't delete last one */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* Can't add more once you are bound */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (*del_vrfid == inp->m_vrf_ids[i]) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (i != (inp->num_vrfs - 1)) {
+ /* Take bottom one and move to this slot */
+ inp->m_vrf_ids[i] = inp->m_vrf_ids[(inp->num_vrfs-1)];
+ }
+ if (*del_vrfid == inp->def_vrf_id) {
+ /* Take the first one as the new default */
+ inp->def_vrf_id = inp->m_vrf_ids[0];
+ }
+ /* Drop the number by one killing last one */
+ inp->num_vrfs--;
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+#endif
+ break;
+ }
+ case SCTP_ADD_VRF_ID:
+ {
+#ifdef SCTP_MVRF
+ uint32_t *add_vrfid;
+ int i;
+
+ SCTP_CHECK_AND_CAST(add_vrfid, optval, uint32_t, optsize);
+ if (*add_vrfid > SCTP_MAX_VRF_ID) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* Can't add more once you are bound */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ /* Verify its not already here */
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (*add_vrfid == inp->m_vrf_ids[i]) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ }
+ if ((inp->num_vrfs + 1) > inp->vrf_size) {
+ /* need to grow array */
+ uint32_t *tarray;
+ SCTP_MALLOC(tarray, uint32_t *,
+ (sizeof(uint32_t) * (inp->vrf_size + SCTP_DEFAULT_VRF_SIZE)),
+ SCTP_M_MVRF);
+ if (tarray == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ memcpy(tarray, inp->m_vrf_ids, (sizeof(uint32_t) * inp->vrf_size));
+ SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+ inp->m_vrf_ids = tarray;
+ inp->vrf_size += SCTP_DEFAULT_VRF_SIZE;
+ }
+ inp->m_vrf_ids[inp->num_vrfs] = *add_vrfid;
+ inp->num_vrfs++;
+ SCTP_INP_WUNLOCK(inp);
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+#endif
+ break;
+ }
+ case SCTP_DELAYED_SACK:
+ {
+ struct sctp_sack_info *sack;
+
+ SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
+ SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+ if (sack->sack_delay) {
+ if (sack->sack_delay > SCTP_MAX_SACK_DELAY) {
+ error = EINVAL;
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ }
+ }
+ if (stcb) {
+ if (sack->sack_delay) {
+ stcb->asoc.delayed_ack = sack->sack_delay;
+ }
+ if (sack->sack_freq) {
+ stcb->asoc.sack_freq = sack->sack_freq;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((sack->sack_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (sack->sack_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ if (sack->sack_delay) {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(sack->sack_delay);
+ }
+ if (sack->sack_freq) {
+ inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((sack->sack_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (sack->sack_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (sack->sack_delay) {
+ stcb->asoc.delayed_ack = sack->sack_delay;
+ }
+ if (sack->sack_freq) {
+ stcb->asoc.sack_freq = sack->sack_freq;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_AUTH_CHUNK:
+ {
+ struct sctp_authchunk *sauth;
+
+ SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
+
+ SCTP_INP_WLOCK(inp);
+ if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ inp->auth_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ case SCTP_AUTH_KEY:
+ {
+ struct sctp_authkey *sca;
+ struct sctp_keyhead *shared_keys;
+ sctp_sharedkey_t *shared_key;
+ sctp_key_t *key = NULL;
+ size_t size;
+
+ SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
+ if (sca->sca_keylength == 0) {
+ size = optsize - sizeof(struct sctp_authkey);
+ } else {
+ if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) {
+ size = sca->sca_keylength;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ }
+ SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
+
+ if (stcb) {
+ shared_keys = &stcb->asoc.shared_keys;
+ /* clear the cached keys for this key id */
+ sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((sca->sca_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (sca->sca_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ shared_keys = &inp->sctp_ep.shared_keys;
+ /*
+ * clear the cached keys on all assocs for
+ * this key id
+ */
+ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((sca->sca_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (sca->sca_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ shared_keys = &stcb->asoc.shared_keys;
+ /* clear the cached keys for this key id */
+ sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint16_t hmacid;
+ uint32_t i;
+
+ SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
+ if ((optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) ||
+ (shmac->shmac_number_of_idents > 0xffff)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+
+ hmaclist = sctp_alloc_hmaclist((uint16_t)shmac->shmac_number_of_idents);
+ if (hmaclist == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ break;
+ }
+ for (i = 0; i < shmac->shmac_number_of_idents; i++) {
+ hmacid = shmac->shmac_idents[i];
+ if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
+ /* invalid HMACs were found */;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ sctp_free_hmaclist(hmaclist);
+ goto sctp_set_hmac_done;
+ }
+ }
+ for (i = 0; i < hmaclist->num_algo; i++) {
+ if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
+ /* already in list */
+ break;
+ }
+ }
+ if (i == hmaclist->num_algo) {
+ /* not found in list */
+ sctp_free_hmaclist(hmaclist);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ /* set it on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs = hmaclist;
+ SCTP_INP_WUNLOCK(inp);
+ sctp_set_hmac_done:
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
+ SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+ /* set the active key on the right place */
+ if (stcb) {
+ /* set the active key on the assoc */
+ if (sctp_auth_setactivekey(stcb,
+ scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((scact->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (scact->scact_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((scact->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (scact->scact_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_auth_setactivekey(stcb, scact->scact_keynumber);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_AUTH_DELETE_KEY:
+ {
+ struct sctp_authkeyid *scdel;
+
+ SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
+ SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
+
+ /* delete the key from the right place */
+ if (stcb) {
+ if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((scdel->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (scdel->scact_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((scdel->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (scdel->scact_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_delete_sharedkey(stcb, scdel->scact_keynumber);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_AUTH_DEACTIVATE_KEY:
+ {
+ struct sctp_authkeyid *keyid;
+
+ SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, optsize);
+ SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
+
+ /* deactivate the key from the right place */
+ if (stcb) {
+ if (sctp_deact_sharedkey(stcb, keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((keyid->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (keyid->scact_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_deact_sharedkey_ep(inp, keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((keyid->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (keyid->scact_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_deact_sharedkey(stcb, keyid->scact_keynumber);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_ENABLE_STREAM_RESET:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ if (av->assoc_value & (~SCTP_ENABLE_VALUE_MASK)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->local_strreset_support = (uint8_t)av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+
+ }
+ break;
+ }
+ case SCTP_RESET_STREAMS:
+ {
+ struct sctp_reset_streams *strrst;
+ int i, send_out = 0;
+ int send_in = 0;
+
+ SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_reset_streams, optsize);
+ SCTP_FIND_STCB(inp, stcb, strrst->srs_assoc_id);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.reconfig_supported == 0) {
+ /*
+ * Peer does not support the chunk type.
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (sizeof(struct sctp_reset_streams) +
+ strrst->srs_number_streams * sizeof(uint16_t) > optsize) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (strrst->srs_flags & SCTP_STREAM_RESET_INCOMING) {
+ send_in = 1;
+ if (stcb->asoc.stream_reset_outstanding) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ }
+ if (strrst->srs_flags & SCTP_STREAM_RESET_OUTGOING) {
+ send_out = 1;
+ }
+ if ((strrst->srs_number_streams > SCTP_MAX_STREAMS_AT_ONCE_RESET) && send_in) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if ((send_in == 0) && (send_out == 0)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ for (i = 0; i < strrst->srs_number_streams; i++) {
+ if ((send_in) &&
+ (strrst->srs_stream_list[i] >= stcb->asoc.streamincnt)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if ((send_out) &&
+ (strrst->srs_stream_list[i] >= stcb->asoc.streamoutcnt)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ }
+ if (error) {
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (send_out) {
+ int cnt;
+ uint16_t strm;
+ if (strrst->srs_number_streams) {
+ for (i = 0, cnt = 0; i < strrst->srs_number_streams; i++) {
+ strm = strrst->srs_stream_list[i];
+ if (stcb->asoc.strmout[strm].state == SCTP_STREAM_OPEN) {
+ stcb->asoc.strmout[strm].state = SCTP_STREAM_RESET_PENDING;
+ cnt++;
+ }
+ }
+ } else {
+ /* Its all */
+ for (i = 0, cnt = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) {
+ stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
+ cnt++;
+ }
+ }
+ }
+ }
+ if (send_in) {
+ error = sctp_send_str_reset_req(stcb, strrst->srs_number_streams,
+ strrst->srs_stream_list,
+ send_in, 0, 0, 0, 0, 0);
+ } else {
+ error = sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_LOCKED);
+ }
+ if (error == 0) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+ } else {
+ /*
+ * For outgoing streams don't report any problems in
+ * sending the request to the application.
+ * XXX: Double check resetting incoming streams.
+ */
+ error = 0;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ case SCTP_ADD_STREAMS:
+ {
+ struct sctp_add_streams *stradd;
+ uint8_t addstream = 0;
+ uint16_t add_o_strmcnt = 0;
+ uint16_t add_i_strmcnt = 0;
+
+ SCTP_CHECK_AND_CAST(stradd, optval, struct sctp_add_streams, optsize);
+ SCTP_FIND_STCB(inp, stcb, stradd->sas_assoc_id);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.reconfig_supported == 0) {
+ /*
+ * Peer does not support the chunk type.
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (stcb->asoc.stream_reset_outstanding) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if ((stradd->sas_outstrms == 0) &&
+ (stradd->sas_instrms == 0)) {
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ if (stradd->sas_outstrms) {
+ addstream = 1;
+ /* We allocate here */
+ add_o_strmcnt = stradd->sas_outstrms;
+ if ((((int)add_o_strmcnt) + ((int)stcb->asoc.streamoutcnt)) > 0x0000ffff) {
+ /* You can't have more than 64k */
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ }
+ if (stradd->sas_instrms) {
+ int cnt;
+
+ addstream |= 2;
+ /* We allocate inside sctp_send_str_reset_req() */
+ add_i_strmcnt = stradd->sas_instrms;
+ cnt = add_i_strmcnt;
+ cnt += stcb->asoc.streamincnt;
+ if (cnt > 0x0000ffff) {
+ /* You can't have more than 64k */
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ if (cnt > (int)stcb->asoc.max_inbound_streams) {
+ /* More than you are allowed */
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ }
+ error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, addstream, add_o_strmcnt, add_i_strmcnt, 0);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+ skip_stuff:
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ case SCTP_RESET_ASSOC:
+ {
+ int i;
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
+ SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) *value);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.reconfig_supported == 0) {
+ /*
+ * Peer does not support the chunk type.
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (stcb->asoc.stream_reset_outstanding) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* Is there any data pending in the send or sent queues? */
+ if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
+ !TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+ busy_out:
+ error = EBUSY;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* Do any streams have data queued? */
+ for ( i = 0; i< stcb->asoc.streamoutcnt; i++) {
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ goto busy_out;
+ }
+ }
+ error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 1, 0, 0, 0, 0);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ case SCTP_CONNECT_X:
+ if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
+ break;
+ case SCTP_CONNECT_X_DELAYED:
+ if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
+ break;
+ case SCTP_CONNECT_X_COMPLETE:
+ {
+ struct sockaddr *sa;
+
+ /* FIXME MT: check correct? */
+ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
+
+ /* find tcb */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.delayed_connection == 1) {
+ stcb->asoc.delayed_connection = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
+ stcb->asoc.primary_destination,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ } else {
+ /*
+ * already expired or did not use delayed
+ * connectx
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ case SCTP_MAX_BURST:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ stcb->asoc.max_burst = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_FUTURE_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.max_burst = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+ (av->assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.max_burst = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_MAXSEG:
+ {
+ struct sctp_assoc_value *av;
+ int ovh;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ if (stcb) {
+ if (av->assoc_value) {
+ stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
+ } else {
+ stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ /* FIXME MT: I think this is not in tune with the API ID */
+ if (av->assoc_value) {
+ inp->sctp_frag_point = (av->assoc_value + ovh);
+ } else {
+ inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
+
+ SCTP_INP_WLOCK(inp);
+ if (events->sctp_data_io_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ }
+
+ if (events->sctp_association_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ }
+
+ if (events->sctp_address_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ }
+
+ if (events->sctp_send_failure_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ }
+
+ if (events->sctp_peer_error_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ }
+
+ if (events->sctp_shutdown_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ }
+
+ if (events->sctp_partial_delivery_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ }
+
+ if (events->sctp_adaptation_layer_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ }
+
+ if (events->sctp_authentication_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ }
+
+ if (events->sctp_sender_dry_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ }
+
+ if (events->sctp_stream_reset_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ }
+ SCTP_INP_WUNLOCK(inp);
+
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (events->sctp_association_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ }
+ if (events->sctp_address_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ }
+ if (events->sctp_send_failure_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ }
+ if (events->sctp_peer_error_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR);
+ }
+ if (events->sctp_shutdown_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ }
+ if (events->sctp_partial_delivery_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT);
+ }
+ if (events->sctp_adaptation_layer_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ }
+ if (events->sctp_authentication_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT);
+ }
+ if (events->sctp_sender_dry_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT);
+ }
+ if (events->sctp_stream_reset_event) {
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ /* Send up the sender dry event only for 1-to-1 style sockets. */
+ if (events->sctp_sender_dry_event) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ case SCTP_ADAPTATION_LAYER:
+ {
+ struct sctp_setadaptation *adap_bits;
+
+ SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
+ inp->sctp_ep.adaptation_layer_indicator_provided = 1;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+#ifdef SCTP_DEBUG
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ {
+ uint32_t *vvv;
+
+ SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.initial_sequence_debug = *vvv;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+#endif
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+ if (stcb) {
+ if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) {
+ memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((s_info->sinfo_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) {
+ memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_PEER_ADDR_PARAMS:
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
+ SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (paddrp->spp_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&paddrp->spp_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&paddrp->spp_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&paddrp->spp_address;
+ }
+#else
+ addr = (struct sockaddr *)&paddrp->spp_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+
+ struct sockaddr_in *sin;
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ /* sanity checks */
+ if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) &&
+ ((paddrp->spp_pathmtu < SCTP_SMALLEST_PMTU) ||
+ (paddrp->spp_pathmtu > SCTP_LARGEST_PMTU))) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ if (stcb != NULL) {
+ /************************TCB SPECIFIC SET ******************/
+ if (net != NULL) {
+ /************************NET SPECIFIC SET ******************/
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ !(net->dest_state & SCTP_ADDR_NOHB)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
+ }
+ net->dest_state |= SCTP_ADDR_NOHB;
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ if (paddrp->spp_hbinterval) {
+ net->heart_beat_delay = paddrp->spp_hbinterval;
+ } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+ net->heart_beat_delay = 0;
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ net->dest_state &= ~SCTP_ADDR_NOHB;
+ }
+ if (paddrp->spp_flags & SPP_HB_DEMAND) {
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
+ sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
+ }
+ net->dest_state |= SCTP_ADDR_NO_PMTUD;
+ net->mtu = paddrp->spp_pathmtu;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ net->mtu += SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ net->mtu += SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ net->mtu += sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (net->mtu < stcb->asoc.smallest_mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+ }
+ if (paddrp->spp_pathmaxrxt) {
+ if (net->dest_state & SCTP_ADDR_PF) {
+ if (net->error_count > paddrp->spp_pathmaxrxt) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ } else {
+ if ((net->error_count <= paddrp->spp_pathmaxrxt) &&
+ (net->error_count > net->pf_threshold)) {
+ net->dest_state |= SCTP_ADDR_PF;
+ sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ if (net->error_count > paddrp->spp_pathmaxrxt) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ } else {
+ if (net->error_count <= paddrp->spp_pathmaxrxt) {
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ }
+ net->failure_threshold = paddrp->spp_pathmaxrxt;
+ }
+ if (paddrp->spp_flags & SPP_DSCP) {
+ net->dscp = paddrp->spp_dscp & 0xfc;
+ net->dscp |= 0x01;
+ }
+#ifdef INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+ net->flowlabel |= 0x80000000;
+ }
+ }
+#endif
+ } else {
+ /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
+ if (paddrp->spp_pathmaxrxt != 0) {
+ stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->dest_state & SCTP_ADDR_PF) {
+ if (net->error_count > paddrp->spp_pathmaxrxt) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ } else {
+ if ((net->error_count <= paddrp->spp_pathmaxrxt) &&
+ (net->error_count > net->pf_threshold)) {
+ net->dest_state |= SCTP_ADDR_PF;
+ sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_13);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ if (net->error_count > paddrp->spp_pathmaxrxt) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ } else {
+ if (net->error_count <= paddrp->spp_pathmaxrxt) {
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ }
+ net->failure_threshold = paddrp->spp_pathmaxrxt;
+ }
+ }
+
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ if (paddrp->spp_hbinterval != 0) {
+ stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
+ } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+ stcb->asoc.heart_beat_delay = 0;
+ }
+ /* Turn back on the timer */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (paddrp->spp_hbinterval != 0) {
+ net->heart_beat_delay = paddrp->spp_hbinterval;
+ } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+ net->heart_beat_delay = 0;
+ }
+ if (net->dest_state & SCTP_ADDR_NOHB) {
+ net->dest_state &= ~SCTP_ADDR_NOHB;
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_14);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ }
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (!(net->dest_state & SCTP_ADDR_NOHB)) {
+ net->dest_state |= SCTP_ADDR_NOHB;
+ if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_15);
+ }
+ }
+ }
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_16);
+ }
+ net->dest_state |= SCTP_ADDR_NO_PMTUD;
+ net->mtu = paddrp->spp_pathmtu;
+ switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ net->mtu += SCTP_MIN_V4_OVERHEAD;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ net->mtu += SCTP_MIN_OVERHEAD;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ net->mtu += sizeof(struct sctphdr);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (net->mtu < stcb->asoc.smallest_mtu) {
+ sctp_pathmtu_adjustment(stcb, net->mtu);
+ }
+ }
+ stcb->asoc.default_mtu = paddrp->spp_pathmtu;
+ sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+ }
+ stcb->asoc.default_mtu = 0;
+ sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+ }
+ if (paddrp->spp_flags & SPP_DSCP) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->dscp = paddrp->spp_dscp & 0xfc;
+ net->dscp |= 0x01;
+ }
+ stcb->asoc.default_dscp = paddrp->spp_dscp & 0xfc;
+ stcb->asoc.default_dscp |= 0x01;
+ }
+#ifdef INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+ net->flowlabel |= 0x80000000;
+ }
+ }
+ stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+ stcb->asoc.default_flowlabel |= 0x80000000;
+ }
+#endif
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /************************NO TCB, SET TO default stuff ******************/
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ /*
+ * For the TOS/FLOWLABEL stuff you set it
+ * with the options on the socket
+ */
+ if (paddrp->spp_pathmaxrxt != 0) {
+ inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
+ }
+
+ if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
+ else if (paddrp->spp_hbinterval != 0) {
+ if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
+ paddrp->spp_hbinterval= SCTP_MAX_HB_INTERVAL;
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval);
+ }
+
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
+ } else if (paddrp->spp_hbinterval) {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval);
+ }
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ inp->sctp_ep.default_mtu = 0;
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+ } else if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
+ inp->sctp_ep.default_mtu = paddrp->spp_pathmtu;
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+ }
+ if (paddrp->spp_flags & SPP_DSCP) {
+ inp->sctp_ep.default_dscp = paddrp->spp_dscp & 0xfc;
+ inp->sctp_ep.default_dscp |= 0x01;
+ }
+#ifdef INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ inp->sctp_ep.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+ inp->sctp_ep.default_flowlabel |= 0x80000000;
+ }
+ }
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+ uint32_t new_init, new_min, new_max;
+
+ SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+ if (stcb) {
+ if (srto->srto_initial)
+ new_init = srto->srto_initial;
+ else
+ new_init = stcb->asoc.initial_rto;
+ if (srto->srto_max)
+ new_max = srto->srto_max;
+ else
+ new_max = stcb->asoc.maxrto;
+ if (srto->srto_min)
+ new_min = srto->srto_min;
+ else
+ new_min = stcb->asoc.minrto;
+ if ((new_min <= new_init) && (new_init <= new_max)) {
+ stcb->asoc.initial_rto = new_init;
+ stcb->asoc.maxrto = new_max;
+ stcb->asoc.minrto = new_min;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (srto->srto_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (srto->srto_initial)
+ new_init = srto->srto_initial;
+ else
+ new_init = inp->sctp_ep.initial_rto;
+ if (srto->srto_max)
+ new_max = srto->srto_max;
+ else
+ new_max = inp->sctp_ep.sctp_maxrto;
+ if (srto->srto_min)
+ new_min = srto->srto_min;
+ else
+ new_min = inp->sctp_ep.sctp_minrto;
+ if ((new_min <= new_init) && (new_init <= new_max)) {
+ inp->sctp_ep.initial_rto = new_init;
+ inp->sctp_ep.sctp_maxrto = new_max;
+ inp->sctp_ep.sctp_minrto = new_min;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+
+ SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
+ SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+ if (sasoc->sasoc_cookie_life) {
+ /* boundary check the cookie life */
+ if (sasoc->sasoc_cookie_life < 1000)
+ sasoc->sasoc_cookie_life = 1000;
+ if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
+ sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
+ }
+ }
+ if (stcb) {
+ if (sasoc->sasoc_asocmaxrxt)
+ stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
+ if (sasoc->sasoc_cookie_life) {
+ stcb->asoc.cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (sasoc->sasoc_asocmaxrxt)
+ inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
+ if (sasoc->sasoc_cookie_life) {
+ inp->sctp_ep.def_cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
+ SCTP_INP_WLOCK(inp);
+ if (sinit->sinit_num_ostreams)
+ inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
+
+ if (sinit->sinit_max_instreams)
+ inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
+
+ if (sinit->sinit_max_attempts)
+ inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
+
+ if (sinit->sinit_max_init_timeo)
+ inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ case SCTP_PRIMARY_ADDR:
+ {
+ struct sctp_setprim *spa;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
+ SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (spa->ssp_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&spa->ssp_addr;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&spa->ssp_addr;
+ }
+ } else {
+ addr = (struct sockaddr *)&spa->ssp_addr;
+ }
+#else
+ addr = (struct sockaddr *)&spa->ssp_addr;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if ((stcb != NULL) && (net != NULL)) {
+ if (net != stcb->asoc.primary_destination) {
+ if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ /* Ok we need to set it */
+ if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
+ if ((stcb->asoc.alternate) &&
+ (!(net->dest_state & SCTP_ADDR_PF)) &&
+ (net->dest_state & SCTP_ADDR_REACHABLE)) {
+ sctp_free_remote_addr(stcb->asoc.alternate);
+ stcb->asoc.alternate = NULL;
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ }
+ case SCTP_SET_DYNAMIC_PRIMARY:
+ {
+ union sctp_sockstore *ss;
+#ifdef SCTP_MVRF
+ int i, fnd = 0;
+#endif
+#if !defined(_WIN32) && !defined(__Userspace__)
+#if defined(__APPLE__)
+ struct proc *proc;
+#endif
+#if defined(__FreeBSD__)
+ error = priv_check(curthread,
+ PRIV_NETINET_RESERVEDPORT);
+#elif defined(__APPLE__)
+ proc = (struct proc *)p;
+ if (p) {
+ error = suser(proc->p_ucred, &proc->p_acflag);
+ } else {
+ break;
+ }
+#else
+ error = suser(p, 0);
+#endif
+ if (error)
+ break;
+#endif
+
+ SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
+ /* SUPER USER CHECK? */
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (vrf_id == inp->m_vrf_ids[i]) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#endif
+ error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
+ break;
+ }
+ case SCTP_SET_PEER_PRIMARY_ADDR:
+ {
+ struct sctp_setpeerprim *sspp;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
+ SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
+ if (stcb != NULL) {
+ struct sctp_ifa *ifa;
+
+#if defined(INET) && defined(INET6)
+ if (sspp->sspp_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&sspp->sspp_addr;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&sspp->sspp_addr;
+ }
+ } else {
+ addr = (struct sockaddr *)&sspp->sspp_addr;
+ }
+#else
+ addr = (struct sockaddr *)&sspp->sspp_addr;
+#endif
+ ifa = sctp_find_ifa_by_addr(addr, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (ifa == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* Must validate the ifa found is in our ep */
+ struct sctp_laddr *laddr;
+ int found = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __func__);
+ continue;
+ }
+ if ((sctp_is_addr_restricted(stcb, laddr->ifa)) &&
+ (!sctp_is_addr_pending(stcb, laddr->ifa))) {
+ continue;
+ }
+ if (laddr->ifa == ifa) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ } else {
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+ break;
+ }
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+#endif
+ }
+ if (sctp_set_primary_ip_address_sa(stcb, addr) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED);
+ out_of_it:
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+ }
+ case SCTP_BINDX_ADD_ADDR:
+ {
+ struct sockaddr *sa;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *td;
+
+ td = (struct thread *)p;
+#endif
+ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
+#ifdef INET
+ if (sa->sa_family == AF_INET) {
+ if (optsize < sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (td != NULL &&
+ (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else
+#endif
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ if (optsize < sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (td != NULL &&
+ (error = prison_local_ip6(td->td_ucred,
+ &(((struct sockaddr_in6 *)sa)->sin6_addr),
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ sctp_bindx_add_address(so, inp, sa, vrf_id, &error, p);
+ break;
+ }
+ case SCTP_BINDX_REM_ADDR:
+ {
+ struct sockaddr *sa;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct thread *td;
+ td = (struct thread *)p;
+
+#endif
+ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
+#ifdef INET
+ if (sa->sa_family == AF_INET) {
+ if (optsize < sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (td != NULL &&
+ (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else
+#endif
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ if (optsize < sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (td != NULL &&
+ (error = prison_local_ip6(td->td_ucred,
+ &(((struct sockaddr_in6 *)sa)->sin6_addr),
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ sctp_bindx_delete_address(inp, sa, vrf_id, &error);
+ break;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ case SCTP_LISTEN_FIX:
+ /* only applies to one-to-many sockets */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* make sure the ACCEPTCONN flag is OFF */
+ so->so_options &= ~SO_ACCEPTCONN;
+ } else {
+ /* otherwise, not allowed */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ break;
+#endif
+ case SCTP_EVENT:
+ {
+ struct sctp_event *event;
+ uint32_t event_type;
+
+ SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, optsize);
+ SCTP_FIND_STCB(inp, stcb, event->se_assoc_id);
+ switch (event->se_type) {
+ case SCTP_ASSOC_CHANGE:
+ event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT;
+ break;
+ case SCTP_PEER_ADDR_CHANGE:
+ event_type = SCTP_PCB_FLAGS_RECVPADDREVNT;
+ break;
+ case SCTP_REMOTE_ERROR:
+ event_type = SCTP_PCB_FLAGS_RECVPEERERR;
+ break;
+ case SCTP_SEND_FAILED:
+ event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
+ break;
+ case SCTP_SHUTDOWN_EVENT:
+ event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
+ break;
+ case SCTP_ADAPTATION_INDICATION:
+ event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT;
+ break;
+ case SCTP_PARTIAL_DELIVERY_EVENT:
+ event_type = SCTP_PCB_FLAGS_PDAPIEVNT;
+ break;
+ case SCTP_AUTHENTICATION_EVENT:
+ event_type = SCTP_PCB_FLAGS_AUTHEVNT;
+ break;
+ case SCTP_STREAM_RESET_EVENT:
+ event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT;
+ break;
+ case SCTP_SENDER_DRY_EVENT:
+ event_type = SCTP_PCB_FLAGS_DRYEVNT;
+ break;
+ case SCTP_NOTIFICATIONS_STOPPED_EVENT:
+ event_type = 0;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+ error = ENOTSUP;
+ break;
+ case SCTP_ASSOC_RESET_EVENT:
+ event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT;
+ break;
+ case SCTP_STREAM_CHANGE_EVENT:
+ event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT;
+ break;
+ case SCTP_SEND_FAILED_EVENT:
+ event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT;
+ break;
+ default:
+ event_type = 0;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (event_type > 0) {
+ if (stcb) {
+ if (event->se_on) {
+ sctp_stcb_feature_on(inp, stcb, event_type);
+ if (event_type == SCTP_PCB_FLAGS_DRYEVNT) {
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
+ }
+ }
+ } else {
+ sctp_stcb_feature_off(inp, stcb, event_type);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /*
+ * We don't want to send up a storm of events,
+ * so return an error for sender dry events
+ */
+ if ((event_type == SCTP_PCB_FLAGS_DRYEVNT) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((event->se_assoc_id == SCTP_ALL_ASSOC) ||
+ (event->se_assoc_id == SCTP_CURRENT_ASSOC))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+ error = ENOTSUP;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((event->se_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (event->se_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ if (event->se_on) {
+ sctp_feature_on(inp, event_type);
+ } else {
+ sctp_feature_off(inp, event_type);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((event->se_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (event->se_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (event->se_on) {
+ sctp_stcb_feature_on(inp, stcb, event_type);
+ } else {
+ sctp_stcb_feature_off(inp, stcb, event_type);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ } else {
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ break;
+ }
+ case SCTP_RECVRCVINFO:
+ {
+ int *onoff;
+
+ SCTP_CHECK_AND_CAST(onoff, optval, int, optsize);
+ SCTP_INP_WLOCK(inp);
+ if (*onoff != 0) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ case SCTP_RECVNXTINFO:
+ {
+ int *onoff;
+
+ SCTP_CHECK_AND_CAST(onoff, optval, int, optsize);
+ SCTP_INP_WLOCK(inp);
+ if (*onoff != 0) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ case SCTP_DEFAULT_SNDINFO:
+ {
+ struct sctp_sndinfo *info;
+ uint16_t policy;
+
+ SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id);
+
+ if (stcb) {
+ if (info->snd_sid < stcb->asoc.streamoutcnt) {
+ stcb->asoc.def_send.sinfo_stream = info->snd_sid;
+ policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+ stcb->asoc.def_send.sinfo_flags = info->snd_flags;
+ stcb->asoc.def_send.sinfo_flags |= policy;
+ stcb->asoc.def_send.sinfo_ppid = info->snd_ppid;
+ stcb->asoc.def_send.sinfo_context = info->snd_context;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((info->snd_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (info->snd_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->def_send.sinfo_stream = info->snd_sid;
+ policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags);
+ inp->def_send.sinfo_flags = info->snd_flags;
+ inp->def_send.sinfo_flags |= policy;
+ inp->def_send.sinfo_ppid = info->snd_ppid;
+ inp->def_send.sinfo_context = info->snd_context;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((info->snd_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (info->snd_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (info->snd_sid < stcb->asoc.streamoutcnt) {
+ stcb->asoc.def_send.sinfo_stream = info->snd_sid;
+ policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+ stcb->asoc.def_send.sinfo_flags = info->snd_flags;
+ stcb->asoc.def_send.sinfo_flags |= policy;
+ stcb->asoc.def_send.sinfo_ppid = info->snd_ppid;
+ stcb->asoc.def_send.sinfo_context = info->snd_context;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_DEFAULT_PRINFO:
+ {
+ struct sctp_default_prinfo *info;
+
+ SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id);
+
+ if (info->pr_policy > SCTP_PR_SCTP_MAX) {
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (stcb) {
+ stcb->asoc.def_send.sinfo_flags &= 0xfff0;
+ stcb->asoc.def_send.sinfo_flags |= info->pr_policy;
+ stcb->asoc.def_send.sinfo_timetolive = info->pr_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((info->pr_assoc_id == SCTP_FUTURE_ASSOC) ||
+ (info->pr_assoc_id == SCTP_ALL_ASSOC)))) {
+ SCTP_INP_WLOCK(inp);
+ inp->def_send.sinfo_flags &= 0xfff0;
+ inp->def_send.sinfo_flags |= info->pr_policy;
+ inp->def_send.sinfo_timetolive = info->pr_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ ((info->pr_assoc_id == SCTP_CURRENT_ASSOC) ||
+ (info->pr_assoc_id == SCTP_ALL_ASSOC))) {
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.def_send.sinfo_flags &= 0xfff0;
+ stcb->asoc.def_send.sinfo_flags |= info->pr_policy;
+ stcb->asoc.def_send.sinfo_timetolive = info->pr_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ break;
+ }
+ case SCTP_PEER_ADDR_THLDS:
+ /* Applies to the specific association */
+ {
+ struct sctp_paddrthlds *thlds;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, optsize);
+ SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (thlds->spt_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&thlds->spt_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&thlds->spt_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&thlds->spt_address;
+ }
+#else
+ addr = (struct sockaddr *)&thlds->spt_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+
+ struct sockaddr_in *sin;
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ if (thlds->spt_pathcpthld != 0xffff) {
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ if (stcb != NULL) {
+ if (net != NULL) {
+ net->failure_threshold = thlds->spt_pathmaxrxt;
+ net->pf_threshold = thlds->spt_pathpfthld;
+ if (net->dest_state & SCTP_ADDR_PF) {
+ if ((net->error_count > net->failure_threshold) ||
+ (net->error_count <= net->pf_threshold)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ } else {
+ if ((net->error_count > net->pf_threshold) &&
+ (net->error_count <= net->failure_threshold)) {
+ net->dest_state |= SCTP_ADDR_PF;
+ sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_17);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ if (net->error_count > net->failure_threshold) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ } else {
+ if (net->error_count <= net->failure_threshold) {
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ }
+ } else {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->failure_threshold = thlds->spt_pathmaxrxt;
+ net->pf_threshold = thlds->spt_pathpfthld;
+ if (net->dest_state & SCTP_ADDR_PF) {
+ if ((net->error_count > net->failure_threshold) ||
+ (net->error_count <= net->pf_threshold)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ }
+ } else {
+ if ((net->error_count > net->pf_threshold) &&
+ (net->error_count <= net->failure_threshold)) {
+ net->dest_state |= SCTP_ADDR_PF;
+ sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_18);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ if (net->error_count > net->failure_threshold) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ } else {
+ if (net->error_count <= net->failure_threshold) {
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+ }
+ }
+ }
+ stcb->asoc.def_net_failure = thlds->spt_pathmaxrxt;
+ stcb->asoc.def_net_pf_threshold = thlds->spt_pathpfthld;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.def_net_failure = thlds->spt_pathmaxrxt;
+ inp->sctp_ep.def_net_pf_threshold = thlds->spt_pathpfthld;
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ {
+ struct sctp_udpencaps *encaps;
+ struct sctp_nets *net;
+ struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin_store;
+#endif
+
+ SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, optsize);
+ SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id);
+
+#if defined(INET) && defined(INET6)
+ if (encaps->sue_address.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&encaps->sue_address;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin_store, sin6);
+ addr = (struct sockaddr *)&sin_store;
+ } else {
+ addr = (struct sockaddr *)&encaps->sue_address;
+ }
+ } else {
+ addr = (struct sockaddr *)&encaps->sue_address;
+ }
+#else
+ addr = (struct sockaddr *)&encaps->sue_address;
+#endif
+ if (stcb != NULL) {
+ net = sctp_findnet(stcb, addr);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() wil
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ net = NULL;
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+
+ struct sockaddr_in *sin;
+ sin = (struct sockaddr_in *)addr;
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)addr;
+ if (sconn->sconn_addr != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else
+#endif
+ {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+
+ if (stcb != NULL) {
+ if (net != NULL) {
+ net->port = encaps->sue_port;
+ } else {
+ stcb->asoc.port = encaps->sue_port;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.port = encaps->sue_port;
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_ECN_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->ecn_supported = 0;
+ } else {
+ inp->ecn_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_PR_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->prsctp_supported = 0;
+ } else {
+ inp->prsctp_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_AUTH_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ if ((av->assoc_value == 0) &&
+ (inp->asconf_supported == 1)) {
+ /* AUTH is required for ASCONF */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->auth_supported = 0;
+ } else {
+ inp->auth_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_ASCONF_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ if ((av->assoc_value != 0) &&
+ (inp->auth_supported == 0)) {
+ /* AUTH is required for ASCONF */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->asconf_supported = 0;
+ sctp_auth_delete_chunk(SCTP_ASCONF,
+ inp->sctp_ep.local_auth_chunks);
+ sctp_auth_delete_chunk(SCTP_ASCONF_ACK,
+ inp->sctp_ep.local_auth_chunks);
+ } else {
+ inp->asconf_supported = 1;
+ sctp_auth_add_chunk(SCTP_ASCONF,
+ inp->sctp_ep.local_auth_chunks);
+ sctp_auth_add_chunk(SCTP_ASCONF_ACK,
+ inp->sctp_ep.local_auth_chunks);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_RECONFIG_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->reconfig_supported = 0;
+ } else {
+ inp->reconfig_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_NRSACK_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->nrsack_supported = 0;
+ } else {
+ inp->nrsack_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_PKTDROP_SUPPORTED:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value == 0) {
+ inp->pktdrop_supported = 0;
+ } else {
+ inp->pktdrop_supported = 1;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ case SCTP_MAX_CWND:
+ {
+ struct sctp_assoc_value *av;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ stcb->asoc.max_cwnd = av->assoc_value;
+ if (stcb->asoc.max_cwnd > 0) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((net->cwnd > stcb->asoc.max_cwnd) &&
+ (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
+ net->cwnd = stcb->asoc.max_cwnd;
+ if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+ net->cwnd = net->mtu - sizeof(struct sctphdr);
+ }
+ }
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
+ (av->assoc_id == SCTP_FUTURE_ASSOC))) {
+ SCTP_INP_WLOCK(inp);
+ inp->max_cwnd = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ }
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ break;
+ } /* end switch (opt) */
+ return (error);
+}
+
+#if !defined(__Userspace__)
+int
+sctp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+#if defined(__FreeBSD__)
+ struct epoch_tracker et;
+ struct sctp_inpcb *inp;
+#endif
+ void *optval = NULL;
+ void *p;
+ size_t optsize = 0;
+ int error = 0;
+
+#if defined(__FreeBSD__)
+ if ((sopt->sopt_level == SOL_SOCKET) &&
+ (sopt->sopt_name == SO_SETFIB)) {
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+ return (EINVAL);
+ }
+ SCTP_INP_WLOCK(inp);
+ inp->fibnum = so->so_fibnum;
+ SCTP_INP_WUNLOCK(inp);
+ return (0);
+ }
+#endif
+ if (sopt->sopt_level != IPPROTO_SCTP) {
+ /* wrong proto level... send back up to IP */
+#ifdef INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ error = ip6_ctloutput(so, sopt);
+#endif /* INET6 */
+#if defined(INET) && defined(INET6)
+ else
+#endif
+#ifdef INET
+ error = ip_ctloutput(so, sopt);
+#endif
+ return (error);
+ }
+ optsize = sopt->sopt_valsize;
+ if (optsize > SCTP_SOCKET_OPTION_LIMIT) {
+ SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+ return (ENOBUFS);
+ }
+ if (optsize) {
+ SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
+ if (optval == NULL) {
+ SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+ return (ENOBUFS);
+ }
+ error = sooptcopyin(sopt, optval, optsize, optsize);
+ if (error) {
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ goto out;
+ }
+ }
+#if defined(__FreeBSD__) || defined(_WIN32)
+ p = (void *)sopt->sopt_td;
+#else
+ p = (void *)sopt->sopt_p;
+#endif
+ if (sopt->sopt_dir == SOPT_SET) {
+#if defined(__FreeBSD__)
+ NET_EPOCH_ENTER(et);
+#endif
+ error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
+#if defined(__FreeBSD__)
+ NET_EPOCH_EXIT(et);
+#endif
+ } else if (sopt->sopt_dir == SOPT_GET) {
+ error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
+ } else {
+ SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if ((error == 0) && (optval != NULL)) {
+ error = sooptcopyout(sopt, optval, optsize);
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ } else if (optval != NULL) {
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ }
+out:
+ return (error);
+}
+#endif
+
+#ifdef INET
+#if defined(__Userspace__)
+int
+sctp_connect(struct socket *so, struct sockaddr *addr)
+{
+ void *p = NULL;
+#elif defined(__FreeBSD__)
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#elif defined(__APPLE__)
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p)
+{
+#elif defined(_WIN32)
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p)
+{
+#else
+static int
+sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+#ifdef SCTP_MVRF
+ int i, fnd = 0;
+#endif
+ int error = 0;
+ int create_lock_on = 0;
+ uint32_t vrf_id;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+
+#if defined(__Userspace__)
+ /* TODO __Userspace__ falls into this code for IPv6 stuff at the moment... */
+#endif
+#if !defined(_WIN32) && !defined(__linux__)
+ switch (addr->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct sockaddr_in6 *sin6;
+
+#endif
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6->sin6_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ return (error);
+ }
+#endif
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct sockaddr_in *sin;
+
+#endif
+#if !defined(_WIN32)
+ if (addr->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sin = (struct sockaddr_in *)addr;
+ if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sin->sin_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ return (error);
+ }
+#endif
+ break;
+ }
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ return (EAFNOSUPPORT);
+ }
+#endif
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_on = 1;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ /* Should I really unlock ? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+ error = EFAULT;
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif
+#if defined(__Userspace__)
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) &&
+ (addr->sa_family != AF_CONN)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+ /* Now do we connect? */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ error = EADDRINUSE;
+ goto out_now;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() will
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ goto out_now;
+ }
+
+ vrf_id = inp->def_vrf_id;
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (vrf_id == inp->m_vrf_ids[i]) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+ inp->sctp_ep.pre_open_stream_count,
+ inp->sctp_ep.port, p,
+ SCTP_INITIALIZE_AUTH_PARAMS);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ goto out_now;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ out_now:
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ if (create_lock_on) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+}
+#endif
+
+#if defined(__Userspace__)
+int
+sctpconn_connect(struct socket *so, struct sockaddr *addr)
+{
+#ifdef SCTP_MVRF
+ int i, fnd = 0;
+#endif
+ void *p = NULL;
+ int error = 0;
+ int create_lock_on = 0;
+ uint32_t vrf_id;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+ case AF_CONN:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_conn)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ return (EAFNOSUPPORT);
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_on = 1;
+
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ /* Should I really unlock ? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+ error = EFAULT;
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+ /* Now do we connect? */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ error = EADDRINUSE;
+ goto out_now;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /* We increment here since sctp_findassociation_ep_addr() will
+ * do a decrement if it finds the stcb as long as the locked
+ * tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ goto out_now;
+ }
+
+ vrf_id = inp->def_vrf_id;
+#ifdef SCTP_MVRF
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (vrf_id == inp->m_vrf_ids[i]) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+ inp->sctp_ep.pre_open_stream_count,
+ inp->sctp_ep.port, p,
+ SCTP_INITIALIZE_AUTH_PARAMS);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ goto out_now;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ out_now:
+ if (create_lock_on) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+
+ SCTP_INP_DECR_REF(inp);
+ return (error);
+}
+#endif
+int
+#if defined(__Userspace__)
+sctp_listen(struct socket *so, int backlog, struct proc *p)
+#elif defined(__FreeBSD__)
+sctp_listen(struct socket *so, int backlog, struct thread *p)
+#elif defined(_WIN32)
+sctp_listen(struct socket *so, int backlog, PKTHREAD p)
+#else
+sctp_listen(struct socket *so, struct proc *p)
+#endif
+{
+ /*
+ * Note this module depends on the protocol processing being called
+ * AFTER any socket level flags and backlog are applied to the
+ * socket. The traditional way that the socket flags are applied is
+ * AFTER protocol processing. We have made a change to the
+ * sys/kern/uipc_socket.c module to reverse this but this MUST be in
+ * place if the socket API for SCTP is to work properly.
+ */
+
+ int error = 0;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+ /* See if we have a listener */
+ struct sctp_inpcb *tinp;
+ union sctp_sockstore store;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* not bound all */
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ memcpy(&store, &laddr->ifa->address, sizeof(store));
+ switch (store.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ store.sin.sin_port = inp->sctp_lport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ store.sin6.sin6_port = inp->sctp_lport;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ store.sconn.sconn_port = inp->sctp_lport;
+ break;
+#endif
+ default:
+ break;
+ }
+ tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (SCTP_IS_LISTENING(tinp))) {
+ /* we have a listener already and its not this inp. */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(tinp);
+ }
+ }
+ } else {
+ /* Setup a local addr bound all */
+ memset(&store, 0, sizeof(store));
+#ifdef INET6
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ store.sa.sa_family = AF_INET6;
+#ifdef HAVE_SA_LEN
+ store.sa.sa_len = sizeof(struct sockaddr_in6);
+#endif
+ }
+#endif
+#if defined(__Userspace__)
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ store.sa.sa_family = AF_CONN;
+#ifdef HAVE_SA_LEN
+ store.sa.sa_len = sizeof(struct sockaddr_conn);
+#endif
+ }
+#endif
+#ifdef INET
+#if defined(__Userspace__)
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) == 0)) {
+#else
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+#endif
+ store.sa.sa_family = AF_INET;
+#ifdef HAVE_SA_LEN
+ store.sa.sa_len = sizeof(struct sockaddr_in);
+#endif
+ }
+#endif
+ switch (store.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ store.sin.sin_port = inp->sctp_lport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ store.sin6.sin6_port = inp->sctp_lport;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ store.sconn.sconn_port = inp->sctp_lport;
+ break;
+#endif
+ default:
+ break;
+ }
+ tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (SCTP_IS_LISTENING(tinp))) {
+ /* we have a listener already and its not this inp. */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(tinp);
+ }
+ }
+ }
+ SCTP_INP_RLOCK(inp);
+#ifdef SCTP_LOCK_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+ }
+#endif
+#if defined(__FreeBSD__) || defined(__Userspace__)
+ SOCK_LOCK(so);
+ error = solisten_proto_check(so);
+ SOCK_UNLOCK(so);
+ if (error) {
+ SCTP_INP_RUNLOCK(inp);
+ return (error);
+ }
+#endif
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* The unlucky case
+ * - We are in the tcp pool with this guy.
+ * - Someone else is in the main inp slot.
+ * - We must move this guy (the listener) to the main slot
+ * - We must then move the guy that was listener to the TCP Pool.
+ */
+ if (sctp_swap_inpcb_for_listen(inp)) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ }
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /* We must do a bind. */
+ if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
+ /* bind error, probably perm */
+ return (error);
+ }
+ }
+ SCTP_INP_WLOCK(inp);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) {
+ SOCK_LOCK(so);
+ solisten_proto(so, backlog);
+ SOCK_UNLOCK(so);
+ }
+#elif defined(_WIN32) || defined(__Userspace__)
+ SOCK_LOCK(so);
+ solisten_proto(so, backlog);
+#endif
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* remove the ACCEPTCONN flag for one-to-many sockets */
+#if defined(__Userspace__)
+ so->so_options &= ~SCTP_SO_ACCEPTCONN;
+#else
+ so->so_options &= ~SO_ACCEPTCONN;
+#endif
+ }
+ SOCK_UNLOCK(so);
+#endif
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+ if (backlog > 0) {
+ inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
+ } else {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
+ }
+#else
+ inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ return (error);
+}
+
+static int sctp_defered_wakeup_cnt = 0;
+
+int
+sctp_accept(struct socket *so, struct sockaddr **addr)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_inpcb *inp;
+ union sctp_sockstore store;
+#ifdef INET6
+#if defined(SCTP_KAME) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ int error;
+#endif
+#endif
+ inp = (struct sctp_inpcb *)so->so_pcb;
+
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ }
+ if (so->so_state & SS_ISDISCONNECTED) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
+ return (ECONNABORTED);
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ SCTP_TCB_LOCK(stcb);
+ store = stcb->asoc.primary_destination->ro._l_addr;
+ SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
+ /* Wake any delayed sleep action */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+ SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
+ if (sowriteable(inp->sctp_socket)) {
+#if defined(__Userspace__)
+ /*__Userspace__ calling sowwakup_locked because of SOCKBUF_LOCK above. */
+#endif
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+ sowwakeup_locked(inp->sctp_socket);
+#else
+#if defined(__APPLE__)
+ /* socket is locked */
+#endif
+ sowwakeup(inp->sctp_socket);
+#endif
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
+ }
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+ SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
+ if (soreadable(inp->sctp_socket)) {
+ sctp_defered_wakeup_cnt++;
+#if defined(__Userspace__)
+ /*__Userspace__ calling sorwakup_locked because of SOCKBUF_LOCK above */
+#endif
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+ sorwakeup_locked(inp->sctp_socket);
+#else
+#if defined(__APPLE__)
+ /* socket is locked */
+#endif
+ sorwakeup(inp->sctp_socket);
+#endif
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
+ }
+ }
+ }
+ SCTP_INP_WUNLOCK(inp);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_19);
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ switch (store.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(*sin);
+#endif
+ sin->sin_port = store.sin.sin_port;
+ sin->sin_addr = store.sin.sin_addr;
+ *addr = (struct sockaddr *)sin;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL)
+ return (ENOMEM);
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(*sin6);
+#endif
+ sin6->sin6_port = store.sin6.sin6_port;
+ sin6->sin6_addr = store.sin6.sin6_addr;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+#ifdef SCTP_KAME
+ if ((error = sa6_recoverscope(sin6)) != 0) {
+ SCTP_FREE_SONAME(sin6);
+ return (error);
+ }
+#else
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
+ /*
+ * sin6->sin6_scope_id =
+ * ntohs(sin6->sin6_addr.s6_addr16[1]);
+ */
+ in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
+ else
+ sin6->sin6_scope_id = 0; /* XXX */
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ *addr = (struct sockaddr *)sin6;
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+
+ SCTP_MALLOC_SONAME(sconn, struct sockaddr_conn *, sizeof(struct sockaddr_conn));
+ if (sconn == NULL) {
+ return (ENOMEM);
+ }
+ sconn->sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ sconn->sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ sconn->sconn_port = store.sconn.sconn_port;
+ sconn->sconn_addr = store.sconn.sconn_addr;
+ *addr = (struct sockaddr *)sconn;
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ return (0);
+}
+
+#ifdef INET
+int
+#if !defined(__Userspace__)
+sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin;
+#else
+sctp_ingetaddr(struct socket *so, struct mbuf *nam)
+{
+ struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
+#endif
+ uint32_t vrf_id;
+ struct sctp_inpcb *inp;
+ struct sctp_ifa *sctp_ifa;
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+#else
+ SCTP_BUF_LEN(nam) = sizeof(*sin);
+ memset(sin, 0, sizeof(*sin));
+#endif
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(*sin);
+#endif
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (!inp) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ SCTP_INP_RLOCK(inp);
+ sin->sin_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in *sin_a;
+ struct sctp_nets *net;
+ int fnd;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto notConn;
+ }
+ fnd = 0;
+ sin_a = NULL;
+ SCTP_TCB_LOCK(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a == NULL)
+ /* this will make coverity happy */
+ continue;
+
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a == NULL)) {
+ /* punt */
+ SCTP_TCB_UNLOCK(stcb);
+ goto notConn;
+ }
+
+ vrf_id = inp->def_vrf_id;
+ sctp_ifa = sctp_source_address_selection(inp,
+ stcb,
+ (sctp_route_t *)&net->ro,
+ net, 0, vrf_id);
+ if (sctp_ifa) {
+ sin->sin_addr = sctp_ifa->address.sin.sin_addr;
+ sctp_free_ifa(sctp_ifa);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* For the bound all case you get back 0 */
+ notConn:
+ sin->sin_addr.s_addr = 0;
+ }
+
+ } else {
+ /* Take the first IPv4 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->address.sa.sa_family == AF_INET) {
+ struct sockaddr_in *sin_a;
+
+ sin_a = &laddr->ifa->address.sin;
+ sin->sin_addr = sin_a->sin_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin);
+#endif
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+#if !defined(__Userspace__)
+ (*addr) = (struct sockaddr *)sin;
+#endif
+ return (0);
+}
+
+int
+#if !defined(__Userspace__)
+sctp_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin;
+#else
+sctp_peeraddr(struct socket *so, struct mbuf *nam)
+{
+ struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
+
+#endif
+ int fnd;
+ struct sockaddr_in *sin_a;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ /* Do the malloc first in case it blocks. */
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+#else
+ SCTP_BUF_LEN(nam) = sizeof(*sin);
+ memset(sin, 0, sizeof(*sin));
+#endif
+ sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(*sin);
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp == NULL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /* UDP type and listeners will drop out here */
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ sin->sin_port = stcb->rport;
+ sin->sin_addr = sin_a->sin_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+#if !defined(__Userspace__)
+ (*addr) = (struct sockaddr *)sin;
+#endif
+ return (0);
+}
+
+#if !defined(__Userspace__)
+struct pr_usrreqs sctp_usrreqs = {
+#if defined(__FreeBSD__)
+ .pru_abort = sctp_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp_attach,
+ .pru_bind = sctp_bind,
+ .pru_connect = sctp_connect,
+ .pru_control = in_control,
+ .pru_close = sctp_close,
+ .pru_detach = sctp_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_flush = sctp_flush,
+ .pru_disconnect = sctp_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp_peeraddr,
+ .pru_send = sctp_sendm,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp_ingetaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+#elif defined(__APPLE__)
+ .pru_abort = sctp_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp_attach,
+ .pru_bind = sctp_bind,
+ .pru_connect = sctp_connect,
+ .pru_connect2 = pru_connect2_notsupp,
+ .pru_control = in_control,
+ .pru_detach = sctp_detach,
+ .pru_disconnect = sctp_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp_peeraddr,
+ .pru_rcvd = NULL,
+ .pru_rcvoob = pru_rcvoob_notsupp,
+ .pru_send = sctp_sendm,
+ .pru_sense = pru_sense_null,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp_ingetaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive,
+ .pru_sopoll = sopoll
+#elif defined(_WIN32) && !defined(__Userspace__)
+ sctp_abort,
+ sctp_accept,
+ sctp_attach,
+ sctp_bind,
+ sctp_connect,
+ pru_connect2_notsupp,
+ NULL,
+ NULL,
+ sctp_disconnect,
+ sctp_listen,
+ sctp_peeraddr,
+ NULL,
+ pru_rcvoob_notsupp,
+ NULL,
+ pru_sense_null,
+ sctp_shutdown,
+ sctp_flush,
+ sctp_ingetaddr,
+ sctp_sosend,
+ sctp_soreceive,
+ sopoll_generic,
+ NULL,
+ sctp_close
+#endif
+};
+#elif !defined(__Userspace__)
+int
+sctp_usrreq(so, req, m, nam, control)
+ struct socket *so;
+ int req;
+ struct mbuf *m, *nam, *control;
+{
+ struct proc *p = curproc;
+ int error;
+ int family;
+ struct sctp_inpcb *inp = (struct sctp_inpcb *)so->so_pcb;
+
+ error = 0;
+ family = so->so_proto->pr_domain->dom_family;
+ if (req == PRU_CONTROL) {
+ switch (family) {
+ case PF_INET:
+ error = in_control(so, (long)m, (caddr_t)nam,
+ (struct ifnet *)control);
+ break;
+#ifdef INET6
+ case PF_INET6:
+ error = in6_control(so, (long)m, (caddr_t)nam,
+ (struct ifnet *)control, p);
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ }
+ return (error);
+ }
+ switch (req) {
+ case PRU_ATTACH:
+ error = sctp_attach(so, family, p);
+ break;
+ case PRU_DETACH:
+ error = sctp_detach(so);
+ break;
+ case PRU_BIND:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp_bind(so, nam, p);
+ break;
+ case PRU_LISTEN:
+ error = sctp_listen(so, p);
+ break;
+ case PRU_CONNECT:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp_connect(so, nam, p);
+ break;
+ case PRU_DISCONNECT:
+ error = sctp_disconnect(so);
+ break;
+ case PRU_ACCEPT:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp_accept(so, nam);
+ break;
+ case PRU_SHUTDOWN:
+ error = sctp_shutdown(so);
+ break;
+
+ case PRU_RCVD:
+ /*
+ * For Open and Net BSD, this is real ugly. The mbuf *nam
+ * that is passed (by soreceive()) is the int flags c ast as
+ * a (mbuf *) yuck!
+ */
+ break;
+
+ case PRU_SEND:
+ /* Flags are ignored */
+ {
+ struct sockaddr *addr;
+
+ if (nam == NULL)
+ addr = NULL;
+ else
+ addr = mtod(nam, struct sockaddr *);
+
+ error = sctp_sendm(so, 0, m, addr, control, p);
+ }
+ break;
+ case PRU_ABORT:
+ error = sctp_abort(so);
+ break;
+
+ case PRU_SENSE:
+ error = 0;
+ break;
+ case PRU_RCVOOB:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ break;
+ case PRU_SENDOOB:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ break;
+ case PRU_PEERADDR:
+ error = sctp_peeraddr(so, nam);
+ break;
+ case PRU_SOCKADDR:
+ error = sctp_ingetaddr(so, nam);
+ break;
+ case PRU_SLOWTIMO:
+ error = 0;
+ break;
+ default:
+ break;
+ }
+ return (error);
+}
+
+#endif
+#endif
+
+#if defined(__Userspace__)
+int
+register_recv_cb(struct socket *so,
+ int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data,
+ size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info))
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *) so->so_pcb;
+ if (inp == NULL) {
+ return (0);
+ }
+ SCTP_INP_WLOCK(inp);
+ inp->recv_callback = receive_cb;
+ SCTP_INP_WUNLOCK(inp);
+ return (1);
+}
+
+int
+register_send_cb(struct socket *so, uint32_t sb_threshold, int (*send_cb)(struct socket *sock, uint32_t sb_free))
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *) so->so_pcb;
+ if (inp == NULL) {
+ return (0);
+ }
+ SCTP_INP_WLOCK(inp);
+ inp->send_callback = send_cb;
+ inp->send_sb_threshold = sb_threshold;
+ SCTP_INP_WUNLOCK(inp);
+ /* FIXME change to current amount free. This will be the full buffer
+ * the first time this is registered but it could be only a portion
+ * of the send buffer if this is called a second time e.g. if the
+ * threshold changes.
+ */
+ return (1);
+}
+
+int
+register_ulp_info (struct socket *so, void *ulp_info)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *) so->so_pcb;
+ if (inp == NULL) {
+ return (0);
+ }
+ SCTP_INP_WLOCK(inp);
+ inp->ulp_info = ulp_info;
+ SCTP_INP_WUNLOCK(inp);
+ return (1);
+}
+
+int
+retrieve_ulp_info (struct socket *so, void **pulp_info)
+{
+ struct sctp_inpcb *inp;
+
+ if (pulp_info == NULL) {
+ return (0);
+ }
+
+ inp = (struct sctp_inpcb *) so->so_pcb;
+ if (inp == NULL) {
+ return (0);
+ }
+ SCTP_INP_RLOCK(inp);
+ *pulp_info = inp->ulp_info;
+ SCTP_INP_RUNLOCK(inp);
+ return (1);
+}
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctp_var.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_var.h
new file mode 100644
index 000000000..d2433d607
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctp_var.h
@@ -0,0 +1,448 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_var.h 363323 2020-07-19 12:34:19Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_VAR_H_
+#define _NETINET_SCTP_VAR_H_
+
+#include <netinet/sctp_uio.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#if !defined(__Userspace__)
+extern struct pr_usrreqs sctp_usrreqs;
+#endif
+
+
+#define sctp_feature_on(inp, feature) (inp->sctp_features |= feature)
+#define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
+#define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature)
+#define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
+
+#define sctp_stcb_feature_on(inp, stcb, feature) {\
+ if (stcb) { \
+ stcb->asoc.sctp_features |= feature; \
+ } else if (inp) { \
+ inp->sctp_features |= feature; \
+ } \
+}
+#define sctp_stcb_feature_off(inp, stcb, feature) {\
+ if (stcb) { \
+ stcb->asoc.sctp_features &= ~feature; \
+ } else if (inp) { \
+ inp->sctp_features &= ~feature; \
+ } \
+}
+#define sctp_stcb_is_feature_on(inp, stcb, feature) \
+ (((stcb != NULL) && \
+ ((stcb->asoc.sctp_features & feature) == feature)) || \
+ ((stcb == NULL) && (inp != NULL) && \
+ ((inp->sctp_features & feature) == feature)))
+#define sctp_stcb_is_feature_off(inp, stcb, feature) \
+ (((stcb != NULL) && \
+ ((stcb->asoc.sctp_features & feature) == 0)) || \
+ ((stcb == NULL) && (inp != NULL) && \
+ ((inp->sctp_features & feature) == 0)) || \
+ ((stcb == NULL) && (inp == NULL)))
+
+/* managing mobility_feature in inpcb (by micchie) */
+#define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature)
+#define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
+#define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
+#define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
+
+#define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
+
+#define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
+
+#define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0))
+
+#define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0)
+
+/*
+ * I tried to cache the readq entries at one point. But the reality
+ * is that it did not add any performance since this meant we had to
+ * lock the STCB on read. And at that point once you have to do an
+ * extra lock, it really does not matter if the lock is in the ZONE
+ * stuff or in our code. Note that this same problem would occur with
+ * an mbuf cache as well so it is not really worth doing, at least
+ * right now :-D
+ */
+#ifdef INVARIANTS
+#define sctp_free_a_readq(_stcb, _readq) { \
+ if ((_readq)->on_strm_q) \
+ panic("On strm q stcb:%p readq:%p", (_stcb), (_readq)); \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
+ SCTP_DECR_READQ_COUNT(); \
+}
+#else
+#define sctp_free_a_readq(_stcb, _readq) { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
+ SCTP_DECR_READQ_COUNT(); \
+}
+#endif
+
+#define sctp_alloc_a_readq(_stcb, _readq) { \
+ (_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
+ if ((_readq)) { \
+ SCTP_INCR_READQ_COUNT(); \
+ } \
+}
+
+#define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked) { \
+ if ((_strmoq)->holds_key_ref) { \
+ sctp_auth_key_release(stcb, sp->auth_keyid, _so_locked); \
+ (_strmoq)->holds_key_ref = 0; \
+ } \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
+ SCTP_DECR_STRMOQ_COUNT(); \
+}
+
+#define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
+ (_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
+ if ((_strmoq)) { \
+ memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \
+ SCTP_INCR_STRMOQ_COUNT(); \
+ (_strmoq)->holds_key_ref = 0; \
+ } \
+}
+
+#define sctp_free_a_chunk(_stcb, _chk, _so_locked) { \
+ if ((_chk)->holds_key_ref) {\
+ sctp_auth_key_release((_stcb), (_chk)->auth_keyid, _so_locked); \
+ (_chk)->holds_key_ref = 0; \
+ } \
+ if (_stcb) { \
+ SCTP_TCB_LOCK_ASSERT((_stcb)); \
+ if ((_chk)->whoTo) { \
+ sctp_free_remote_addr((_chk)->whoTo); \
+ (_chk)->whoTo = NULL; \
+ } \
+ if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
+ (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+ SCTP_DECR_CHK_COUNT(); \
+ } else { \
+ TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ (_stcb)->asoc.free_chunk_cnt++; \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+ } \
+ } else { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+ SCTP_DECR_CHK_COUNT(); \
+ } \
+}
+
+#define sctp_alloc_a_chunk(_stcb, _chk) { \
+ if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
+ (_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
+ if ((_chk)) { \
+ SCTP_INCR_CHK_COUNT(); \
+ (_chk)->whoTo = NULL; \
+ (_chk)->holds_key_ref = 0; \
+ } \
+ } else { \
+ (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
+ TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+ (_chk)->holds_key_ref = 0; \
+ SCTP_STAT_INCR(sctps_cached_chk); \
+ (_stcb)->asoc.free_chunk_cnt--; \
+ } \
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+
+#define sctp_free_remote_addr(__net) { \
+ if ((__net)) { \
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
+ RO_NHFREE(&(__net)->ro); \
+ if ((__net)->src_addr_selected) { \
+ sctp_free_ifa((__net)->ro._s_addr); \
+ (__net)->ro._s_addr = NULL; \
+ } \
+ (__net)->src_addr_selected = 0; \
+ (__net)->dest_state &= ~SCTP_ADDR_REACHABLE; \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
+ SCTP_DECR_RADDR_COUNT(); \
+ } \
+ } \
+}
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
+ if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+ SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+ atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+ atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+ if (stcb) { \
+ atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+ SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+ atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+#else /* FreeBSD Version <= 500000 or non-FreeBSD */
+
+#define sctp_free_remote_addr(__net) { \
+ if ((__net)) { \
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
+ if ((__net)->ro.ro_rt) { \
+ RTFREE((__net)->ro.ro_rt); \
+ (__net)->ro.ro_rt = NULL; \
+ } \
+ if ((__net)->src_addr_selected) { \
+ sctp_free_ifa((__net)->ro._s_addr); \
+ (__net)->ro._s_addr = NULL; \
+ } \
+ (__net)->src_addr_selected = 0; \
+ (__net)->dest_state &=~SCTP_ADDR_REACHABLE; \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
+ SCTP_DECR_RADDR_COUNT(); \
+ } \
+ } \
+}
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
+ if (((ctl)->do_not_ref_stcb == 0) && stcb) { \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+ atomic_add_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+ if (stcb) { \
+ atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+}
+#endif
+
+#define sctp_ucount_incr(val) { \
+ val++; \
+}
+
+#define sctp_ucount_decr(val) { \
+ if (val > 0) { \
+ val--; \
+ } else { \
+ val = 0; \
+ } \
+}
+
+#define sctp_mbuf_crush(data) do { \
+ struct mbuf *_m; \
+ _m = (data); \
+ while (_m && (SCTP_BUF_LEN(_m) == 0)) { \
+ (data) = SCTP_BUF_NEXT(_m); \
+ SCTP_BUF_NEXT(_m) = NULL; \
+ sctp_m_free(_m); \
+ _m = (data); \
+ } \
+} while (0)
+
+#define sctp_flight_size_decrease(tp1) do { \
+ if (tp1->whoTo->flight_size >= tp1->book_size) \
+ tp1->whoTo->flight_size -= tp1->book_size; \
+ else \
+ tp1->whoTo->flight_size = 0; \
+} while (0)
+
+#define sctp_flight_size_increase(tp1) do { \
+ (tp1)->whoTo->flight_size += (tp1)->book_size; \
+} while (0)
+
+#ifdef SCTP_FS_SPEC_LOG
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+ stcb->asoc.fs_index = 0;\
+ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
+ stcb->asoc.fs_index++; \
+ tp1->window_probe = 0; \
+ if (stcb->asoc.total_flight >= tp1->book_size) { \
+ stcb->asoc.total_flight -= tp1->book_size; \
+ if (stcb->asoc.total_flight_count > 0) \
+ stcb->asoc.total_flight_count--; \
+ } else { \
+ stcb->asoc.total_flight = 0; \
+ stcb->asoc.total_flight_count = 0; \
+ } \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+ stcb->asoc.fs_index = 0;\
+ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \
+ stcb->asoc.fs_index++; \
+ (stcb)->asoc.total_flight_count++; \
+ (stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#else
+
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+ tp1->window_probe = 0; \
+ if (stcb->asoc.total_flight >= tp1->book_size) { \
+ stcb->asoc.total_flight -= tp1->book_size; \
+ if (stcb->asoc.total_flight_count > 0) \
+ stcb->asoc.total_flight_count--; \
+ } else { \
+ stcb->asoc.total_flight = 0; \
+ stcb->asoc.total_flight_count = 0; \
+ } \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+ (stcb)->asoc.total_flight_count++; \
+ (stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#endif
+
+#define SCTP_PF_ENABLED(_net) (_net->pf_threshold < _net->failure_threshold)
+#define SCTP_NET_IS_PF(_net) (_net->pf_threshold < _net->error_count)
+
+struct sctp_nets;
+struct sctp_inpcb;
+struct sctp_tcb;
+struct sctphdr;
+
+
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+void sctp_close(struct socket *so);
+#else
+int sctp_detach(struct socket *so);
+#endif
+int sctp_disconnect(struct socket *so);
+#if !defined(__Userspace__)
+#if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN)
+void sctp_ctlinput(int, struct sockaddr *, void *, struct ifnet * SCTP_UNUSED);
+#else
+void sctp_ctlinput(int, struct sockaddr *, void *);
+#endif
+int sctp_ctloutput(struct socket *, struct sockopt *);
+#ifdef INET
+void sctp_input_with_port(struct mbuf *, int, uint16_t);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+int sctp_input(struct mbuf **, int *, int);
+#else
+void sctp_input(struct mbuf *, int);
+#endif
+#endif
+void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
+#else
+#if defined(__Userspace__)
+void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
+#else
+void sctp_input(struct mbuf *,...);
+#endif
+void *sctp_ctlinput(int, struct sockaddr *, void *);
+int sctp_ctloutput(int, struct socket *, int, int, struct mbuf **);
+#endif
+void sctp_drain(void);
+#if defined(__Userspace__)
+void sctp_init(uint16_t,
+ int (*)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*)(const char *, ...), int start_threads);
+#elif defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) &&!defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+void sctp_init(struct protosw *pp, struct domain *dp);
+#else
+void sctp_init(void);
+void sctp_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *,
+ uint8_t, uint8_t, uint16_t, uint32_t);
+#endif
+#if !defined(__FreeBSD__) && !defined(__Userspace__)
+void sctp_finish(void);
+#endif
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+int sctp_flush(struct socket *, int);
+#endif
+int sctp_shutdown(struct socket *);
+int sctp_bindx(struct socket *, int, struct sockaddr_storage *,
+ int, int, struct proc *);
+/* can't use sctp_assoc_t here */
+int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *);
+#if !defined(__Userspace__)
+int sctp_ingetaddr(struct socket *, struct sockaddr **);
+#else
+int sctp_ingetaddr(struct socket *, struct mbuf *);
+#endif
+#if !defined(__Userspace__)
+int sctp_peeraddr(struct socket *, struct sockaddr **);
+#else
+int sctp_peeraddr(struct socket *, struct mbuf *);
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+int sctp_listen(struct socket *, int, struct thread *);
+#elif defined(_WIN32) && !defined(__Userspace__)
+int sctp_listen(struct socket *, int, PKTHREAD);
+#elif defined(__Userspace__)
+int sctp_listen(struct socket *, int, struct proc *);
+#else
+int sctp_listen(struct socket *, struct proc *);
+#endif
+int sctp_accept(struct socket *, struct sockaddr **);
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_VAR_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.c b/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.c
new file mode 100644
index 000000000..19c17b333
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.c
@@ -0,0 +1,8695 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 363323 2020-07-19 12:34:19Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#ifdef INET6
+#if defined(__Userspace__) || defined(__FreeBSD__)
+#include <netinet6/sctp6_var.h>
+#endif
+#endif
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_constants.h>
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <netinet/sctp_kdtrace.h>
+#if defined(INET6) || defined(INET)
+#include <netinet/tcp_var.h>
+#endif
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+#include <sys/proc.h>
+#ifdef INET6
+#include <netinet/icmp6.h>
+#endif
+#endif
+
+#if defined(_WIN32) && !defined(__Userspace__)
+#if !defined(SCTP_LOCAL_TRACE_BUF)
+#include "eventrace_netinet.h"
+#include "sctputil.tmh" /* this is the file that will be auto generated */
+#endif
+#else
+#ifndef KTR_SCTP
+#define KTR_SCTP KTR_SUBSYS
+#endif
+#endif
+
+extern const struct sctp_cc_functions sctp_cc_functions[];
+extern const struct sctp_ss_functions sctp_ss_functions[];
+
+void
+sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.sb.stcb = stcb;
+ sctp_clog.x.sb.so_sbcc = sb->sb_cc;
+ if (stcb)
+ sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
+ else
+ sctp_clog.x.sb.stcb_sbcc = 0;
+ sctp_clog.x.sb.incr = incr;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_SB,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.close.inp = (void *)inp;
+ sctp_clog.x.close.sctp_flags = inp->sctp_flags;
+ if (stcb) {
+ sctp_clog.x.close.stcb = (void *)stcb;
+ sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
+ } else {
+ sctp_clog.x.close.stcb = 0;
+ sctp_clog.x.close.state = 0;
+ }
+ sctp_clog.x.close.loc = loc;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_CLOSE,
+ 0,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+rto_logging(struct sctp_nets *net, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.rto.net = (void *) net;
+ sctp_clog.x.rto.rtt = net->rtt / 1000;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RTT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.strlog.stcb = stcb;
+ sctp_clog.x.strlog.n_tsn = tsn;
+ sctp_clog.x.strlog.n_sseq = sseq;
+ sctp_clog.x.strlog.e_tsn = 0;
+ sctp_clog.x.strlog.e_sseq = 0;
+ sctp_clog.x.strlog.strm = stream;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_STRM,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.nagle.stcb = (void *)stcb;
+ sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
+ sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
+ sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
+ sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_NAGLE,
+ action,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.sack.cumack = cumack;
+ sctp_clog.x.sack.oldcumack = old_cumack;
+ sctp_clog.x.sack.tsn = tsn;
+ sctp_clog.x.sack.numGaps = gaps;
+ sctp_clog.x.sack.numDups = dups;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_SACK,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.map.base = map;
+ sctp_clog.x.map.cum = cum;
+ sctp_clog.x.map.high = high;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MAP,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.fr.largest_tsn = biggest_tsn;
+ sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
+ sctp_clog.x.fr.tsn = tsn;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_FR,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+#ifdef SCTP_MBUF_LOGGING
+void
+sctp_log_mb(struct mbuf *m, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.mb.mp = m;
+ sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
+ sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
+ sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
+ if (SCTP_BUF_IS_EXTENDED(m)) {
+ sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
+#else
+ sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
+#endif
+ } else {
+ sctp_clog.x.mb.ext = 0;
+ sctp_clog.x.mb.refcnt = 0;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MBUF,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_mbc(struct mbuf *m, int from)
+{
+ struct mbuf *mat;
+
+ for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
+ sctp_log_mb(mat, from);
+ }
+}
+#endif
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ if (control == NULL) {
+ SCTP_PRINTF("Gak log of NULL?\n");
+ return;
+ }
+ sctp_clog.x.strlog.stcb = control->stcb;
+ sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
+ sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
+ sctp_clog.x.strlog.strm = control->sinfo_stream;
+ if (poschk != NULL) {
+ sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
+ sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
+ } else {
+ sctp_clog.x.strlog.e_tsn = 0;
+ sctp_clog.x.strlog.e_sseq = 0;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_STRM,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.cwnd.net = net;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+
+ if (net) {
+ sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
+ sctp_clog.x.cwnd.inflight = net->flight_size;
+ sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
+ sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
+ sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
+ }
+ if (SCTP_CWNDLOG_PRESEND == from) {
+ sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
+ }
+ sctp_clog.x.cwnd.cwnd_augment = augment;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_CWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+#if !defined(__APPLE__) && !defined(__Userspace__)
+void
+sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ if (inp) {
+ sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
+
+ } else {
+ sctp_clog.x.lock.sock = (void *) NULL;
+ }
+ sctp_clog.x.lock.inp = (void *) inp;
+#if defined(__FreeBSD__)
+ if (stcb) {
+ sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
+ } else {
+ sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
+ }
+ if (inp) {
+ sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
+ sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
+ } else {
+ sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
+ }
+ sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
+ if (inp && (inp->sctp_socket)) {
+ sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
+ } else {
+ sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
+ }
+#endif
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_LOCK_EVENT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+#endif
+
+void
+sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.cwnd.net = net;
+ sctp_clog.x.cwnd.cwnd_new_value = error;
+ sctp_clog.x.cwnd.inflight = net->flight_size;
+ sctp_clog.x.cwnd.cwnd_augment = burst;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MAXBURST,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.rwnd.rwnd = peers_rwnd;
+ sctp_clog.x.rwnd.send_size = snd_size;
+ sctp_clog.x.rwnd.overhead = overhead;
+ sctp_clog.x.rwnd.new_rwnd = 0;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.rwnd.rwnd = peers_rwnd;
+ sctp_clog.x.rwnd.send_size = flight_size;
+ sctp_clog.x.rwnd.overhead = overhead;
+ sctp_clog.x.rwnd.new_rwnd = a_rwndval;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+static void
+sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.mbcnt.total_queue_size = total_oq;
+ sctp_clog.x.mbcnt.size_change = book;
+ sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
+ sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MBCNT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+#endif
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_MISC_EVENT,
+ from,
+ a, b, c, d);
+#endif
+}
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.wake.stcb = (void *)stcb;
+ sctp_clog.x.wake.wake_cnt = wake_cnt;
+ sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
+ sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
+ sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
+
+ if (stcb->asoc.stream_queue_cnt < 0xff)
+ sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
+ else
+ sctp_clog.x.wake.stream_qcnt = 0xff;
+
+ if (stcb->asoc.chunks_on_out_queue < 0xff)
+ sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
+ else
+ sctp_clog.x.wake.chunks_on_oque = 0xff;
+
+ sctp_clog.x.wake.sctpflags = 0;
+ /* set in the defered mode stuff */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
+ sctp_clog.x.wake.sctpflags |= 1;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
+ sctp_clog.x.wake.sctpflags |= 2;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
+ sctp_clog.x.wake.sctpflags |= 4;
+ /* what about the sb */
+ if (stcb->sctp_socket) {
+ struct socket *so = stcb->sctp_socket;
+
+ sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
+ } else {
+ sctp_clog.x.wake.sbflags = 0xff;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_WAKE,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
+ sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
+ sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
+ sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
+ sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
+ sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
+ sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_BLOCK,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+#endif
+}
+
+int
+sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
+{
+ /* May need to fix this if ktrdump does not work */
+ return (0);
+}
+
+#ifdef SCTP_AUDITING_ENABLED
+uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
+static int sctp_audit_indx = 0;
+
+static
+void
+sctp_print_audit_report(void)
+{
+ int i;
+ int cnt;
+
+ cnt = 0;
+ for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ SCTP_PRINTF("\n");
+ cnt = 0;
+ }
+ SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ SCTP_PRINTF("\n");
+ }
+ for (i = 0; i < sctp_audit_indx; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ SCTP_PRINTF("\n");
+ cnt = 0;
+ }
+ SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ SCTP_PRINTF("\n");
+ }
+ SCTP_PRINTF("\n");
+}
+
+void
+sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int resend_cnt, tot_out, rep, tot_book_cnt;
+ struct sctp_nets *lnet;
+ struct sctp_tmit_chunk *chk;
+
+ sctp_audit_data[sctp_audit_indx][0] = 0xAA;
+ sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ if (inp == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x01;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ if (stcb == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x02;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ sctp_audit_data[sctp_audit_indx][0] = 0xA1;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 0;
+ tot_book_cnt = 0;
+ resend_cnt = tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ resend_cnt++;
+ } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ tot_out += chk->book_size;
+ tot_book_cnt++;
+ }
+ }
+ if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA1;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
+ resend_cnt, stcb->asoc.sent_queue_retran_cnt);
+ rep = 1;
+ stcb->asoc.sent_queue_retran_cnt = resend_cnt;
+ sctp_audit_data[sctp_audit_indx][0] = 0xA2;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA2;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
+ (int)stcb->asoc.total_flight);
+ stcb->asoc.total_flight = tot_out;
+ }
+ if (tot_book_cnt != stcb->asoc.total_flight_count) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA5;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
+
+ stcb->asoc.total_flight_count = tot_book_cnt;
+ }
+ tot_out = 0;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ tot_out += lnet->flight_size;
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA3;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("real flight:%d net total was %d\n",
+ stcb->asoc.total_flight, tot_out);
+ /* now corrective action */
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+
+ tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->whoTo == lnet) &&
+ (chk->sent < SCTP_DATAGRAM_RESEND)) {
+ tot_out += chk->book_size;
+ }
+ }
+ if (lnet->flight_size != tot_out) {
+ SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
+ (void *)lnet, lnet->flight_size,
+ tot_out);
+ lnet->flight_size = tot_out;
+ }
+ }
+ }
+ if (rep) {
+ sctp_print_audit_report();
+ }
+}
+
+void
+sctp_audit_log(uint8_t ev, uint8_t fd)
+{
+
+ sctp_audit_data[sctp_audit_indx][0] = ev;
+ sctp_audit_data[sctp_audit_indx][1] = fd;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+}
+
+#endif
+
+/*
+ * The conversion from time to ticks and vice versa is done by rounding
+ * upwards. This way we can test in the code the time to be positive and
+ * know that this corresponds to a positive number of ticks.
+ */
+
+uint32_t
+sctp_msecs_to_ticks(uint32_t msecs)
+{
+ uint64_t temp;
+ uint32_t ticks;
+
+ if (hz == 1000) {
+ ticks = msecs;
+ } else {
+ temp = (((uint64_t)msecs * hz) + 999) / 1000;
+ if (temp > UINT32_MAX) {
+ ticks = UINT32_MAX;
+ } else {
+ ticks = (uint32_t)temp;
+ }
+ }
+ return (ticks);
+}
+
+uint32_t
+sctp_ticks_to_msecs(uint32_t ticks)
+{
+ uint64_t temp;
+ uint32_t msecs;
+
+ if (hz == 1000) {
+ msecs = ticks;
+ } else {
+ temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
+ if (temp > UINT32_MAX) {
+ msecs = UINT32_MAX;
+ } else {
+ msecs = (uint32_t)temp;
+ }
+ }
+ return (msecs);
+}
+
+uint32_t
+sctp_secs_to_ticks(uint32_t secs)
+{
+ uint64_t temp;
+ uint32_t ticks;
+
+ temp = (uint64_t)secs * hz;
+ if (temp > UINT32_MAX) {
+ ticks = UINT32_MAX;
+ } else {
+ ticks = (uint32_t)temp;
+ }
+ return (ticks);
+}
+
+uint32_t
+sctp_ticks_to_secs(uint32_t ticks)
+{
+ uint64_t temp;
+ uint32_t secs;
+
+ temp = ((uint64_t)ticks + (hz - 1)) / hz;
+ if (temp > UINT32_MAX) {
+ secs = UINT32_MAX;
+ } else {
+ secs = (uint32_t)temp;
+ }
+ return (secs);
+}
+
+/*
+ * sctp_stop_timers_for_shutdown() should be called
+ * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
+ * state to make sure that all timers are stopped.
+ */
+void
+sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+
+ inp = stcb->sctp_ep;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
+ sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
+ sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
+ }
+}
+
+void
+sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+
+ inp = stcb->sctp_ep;
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
+ sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
+ if (stop_assoc_kill_timer) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
+ sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
+ /* Mobility adaptation */
+ sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
+ sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
+ }
+}
+
+/*
+ * A list of sizes based on typical mtu's, used only if next hop size not
+ * returned. These values MUST be multiples of 4 and MUST be ordered.
+ */
+static uint32_t sctp_mtu_sizes[] = {
+ 68,
+ 296,
+ 508,
+ 512,
+ 544,
+ 576,
+ 1004,
+ 1492,
+ 1500,
+ 1536,
+ 2000,
+ 2048,
+ 4352,
+ 4464,
+ 8168,
+ 17912,
+ 32000,
+ 65532
+};
+
+/*
+ * Return the largest MTU in sctp_mtu_sizes smaller than val.
+ * If val is smaller than the minimum, just return the largest
+ * multiple of 4 smaller or equal to val.
+ * Ensure that the result is a multiple of 4.
+ */
+uint32_t
+sctp_get_prev_mtu(uint32_t val)
+{
+ uint32_t i;
+
+ val &= 0xfffffffc;
+ if (val <= sctp_mtu_sizes[0]) {
+ return (val);
+ }
+ for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+ if (val <= sctp_mtu_sizes[i]) {
+ break;
+ }
+ }
+ KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
+ ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
+ return (sctp_mtu_sizes[i - 1]);
+}
+
+/*
+ * Return the smallest MTU in sctp_mtu_sizes larger than val.
+ * If val is larger than the maximum, just return the largest multiple of 4 smaller
+ * or equal to val.
+ * Ensure that the result is a multiple of 4.
+ */
+uint32_t
+sctp_get_next_mtu(uint32_t val)
+{
+ /* select another MTU that is just bigger than this one */
+ uint32_t i;
+
+ val &= 0xfffffffc;
+ for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+ if (val < sctp_mtu_sizes[i]) {
+ KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
+ ("sctp_mtu_sizes[%u] not a multiple of 4", i));
+ return (sctp_mtu_sizes[i]);
+ }
+ }
+ return (val);
+}
+
+void
+sctp_fill_random_store(struct sctp_pcb *m)
+{
+ /*
+ * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
+ * our counter. The result becomes our good random numbers and we
+ * then setup to give these out. Note that we do no locking to
+ * protect this. This is ok, since if competing folks call this we
+ * will get more gobbled gook in the random store which is what we
+ * want. There is a danger that two guys will use the same random
+ * numbers, but thats ok too since that is random as well :->
+ */
+ m->store_at = 0;
+#if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION)
+ for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) {
+ m->random_store[i] = (uint8_t) rand();
+ }
+#else
+ (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
+ sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
+ sizeof(m->random_counter), (uint8_t *)m->random_store);
+#endif
+ m->random_counter++;
+}
+
+uint32_t
+sctp_select_initial_TSN(struct sctp_pcb *inp)
+{
+ /*
+ * A true implementation should use random selection process to get
+ * the initial stream sequence number, using RFC1750 as a good
+ * guideline
+ */
+ uint32_t x, *xp;
+ uint8_t *p;
+ int store_at, new_store;
+
+ if (inp->initial_sequence_debug != 0) {
+ uint32_t ret;
+
+ ret = inp->initial_sequence_debug;
+ inp->initial_sequence_debug++;
+ return (ret);
+ }
+ retry:
+ store_at = inp->store_at;
+ new_store = store_at + sizeof(uint32_t);
+ if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
+ new_store = 0;
+ }
+ if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
+ goto retry;
+ }
+ if (new_store == 0) {
+ /* Refill the random store */
+ sctp_fill_random_store(inp);
+ }
+ p = &inp->random_store[store_at];
+ xp = (uint32_t *)p;
+ x = *xp;
+ return (x);
+}
+
+uint32_t
+sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
+{
+ uint32_t x;
+ struct timeval now;
+
+ if (check) {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ }
+ for (;;) {
+ x = sctp_select_initial_TSN(&inp->sctp_ep);
+ if (x == 0) {
+ /* we never use 0 */
+ continue;
+ }
+ if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
+ break;
+ }
+ }
+ return (x);
+}
+
+int32_t
+sctp_map_assoc_state(int kernel_state)
+{
+ int32_t user_state;
+
+ if (kernel_state & SCTP_STATE_WAS_ABORTED) {
+ user_state = SCTP_CLOSED;
+ } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
+ user_state = SCTP_SHUTDOWN_PENDING;
+ } else {
+ switch (kernel_state & SCTP_STATE_MASK) {
+ case SCTP_STATE_EMPTY:
+ user_state = SCTP_CLOSED;
+ break;
+ case SCTP_STATE_INUSE:
+ user_state = SCTP_CLOSED;
+ break;
+ case SCTP_STATE_COOKIE_WAIT:
+ user_state = SCTP_COOKIE_WAIT;
+ break;
+ case SCTP_STATE_COOKIE_ECHOED:
+ user_state = SCTP_COOKIE_ECHOED;
+ break;
+ case SCTP_STATE_OPEN:
+ user_state = SCTP_ESTABLISHED;
+ break;
+ case SCTP_STATE_SHUTDOWN_SENT:
+ user_state = SCTP_SHUTDOWN_SENT;
+ break;
+ case SCTP_STATE_SHUTDOWN_RECEIVED:
+ user_state = SCTP_SHUTDOWN_RECEIVED;
+ break;
+ case SCTP_STATE_SHUTDOWN_ACK_SENT:
+ user_state = SCTP_SHUTDOWN_ACK_SENT;
+ break;
+ default:
+ user_state = SCTP_CLOSED;
+ break;
+ }
+ }
+ return (user_state);
+}
+
+int
+sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
+{
+ struct sctp_association *asoc;
+ /*
+ * Anything set to zero is taken care of by the allocation routine's
+ * bzero
+ */
+
+ /*
+ * Up front select what scoping to apply on addresses I tell my peer
+ * Not sure what to do with these right now, we will need to come up
+ * with a way to set them. We may need to pass them through from the
+ * caller in the sctp_aloc_assoc() function.
+ */
+ int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+ int j;
+#endif
+
+ asoc = &stcb->asoc;
+ /* init all variables to a known value. */
+ SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
+ asoc->max_burst = inp->sctp_ep.max_burst;
+ asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
+ asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ asoc->cookie_life = inp->sctp_ep.def_cookie_life;
+ asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+ asoc->ecn_supported = inp->ecn_supported;
+ asoc->prsctp_supported = inp->prsctp_supported;
+ asoc->auth_supported = inp->auth_supported;
+ asoc->asconf_supported = inp->asconf_supported;
+ asoc->reconfig_supported = inp->reconfig_supported;
+ asoc->nrsack_supported = inp->nrsack_supported;
+ asoc->pktdrop_supported = inp->pktdrop_supported;
+ asoc->idata_supported = inp->idata_supported;
+ asoc->sctp_cmt_pf = (uint8_t)0;
+ asoc->sctp_frag_point = inp->sctp_frag_point;
+ asoc->sctp_features = inp->sctp_features;
+ asoc->default_dscp = inp->sctp_ep.default_dscp;
+ asoc->max_cwnd = inp->max_cwnd;
+#ifdef INET6
+ if (inp->sctp_ep.default_flowlabel) {
+ asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
+ } else {
+ if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
+ asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
+ asoc->default_flowlabel &= 0x000fffff;
+ asoc->default_flowlabel |= 0x80000000;
+ } else {
+ asoc->default_flowlabel = 0;
+ }
+ }
+#endif
+ asoc->sb_send_resv = 0;
+ if (override_tag) {
+ asoc->my_vtag = override_tag;
+ } else {
+ asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ }
+ /* Get the nonce tags */
+ asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+ asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+ asoc->vrf_id = vrf_id;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ asoc->tsn_in_at = 0;
+ asoc->tsn_out_at = 0;
+ asoc->tsn_in_wrapped = 0;
+ asoc->tsn_out_wrapped = 0;
+ asoc->cumack_log_at = 0;
+ asoc->cumack_log_atsnt = 0;
+#endif
+#ifdef SCTP_FS_SPEC_LOG
+ asoc->fs_index = 0;
+#endif
+ asoc->refcnt = 0;
+ asoc->assoc_up_sent = 0;
+ asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
+ sctp_select_initial_TSN(&inp->sctp_ep);
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+ /* we are optimisitic here */
+ asoc->peer_supports_nat = 0;
+ asoc->sent_queue_retran_cnt = 0;
+
+ /* for CMT */
+ asoc->last_net_cmt_send_started = NULL;
+
+ /* This will need to be adjusted */
+ asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ asoc->asconf_seq_in = asoc->last_acked_seq;
+
+ /* here we are different, we hold the next one we expect */
+ asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
+
+ asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
+ asoc->initial_rto = inp->sctp_ep.initial_rto;
+
+ asoc->default_mtu = inp->sctp_ep.default_mtu;
+ asoc->max_init_times = inp->sctp_ep.max_init_times;
+ asoc->max_send_times = inp->sctp_ep.max_send_times;
+ asoc->def_net_failure = inp->sctp_ep.def_net_failure;
+ asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
+ asoc->free_chunk_cnt = 0;
+
+ asoc->iam_blocking = 0;
+ asoc->context = inp->sctp_context;
+ asoc->local_strreset_support = inp->local_strreset_support;
+ asoc->def_send = inp->def_send;
+ asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
+ asoc->pr_sctp_cnt = 0;
+ asoc->total_output_queue_size = 0;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ asoc->scope.ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(inp) == 0) {
+ asoc->scope.ipv4_addr_legal = 1;
+ } else {
+ asoc->scope.ipv4_addr_legal = 0;
+ }
+#if defined(__Userspace__)
+ asoc->scope.conn_addr_legal = 0;
+#endif
+ } else {
+ asoc->scope.ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+ asoc->scope.conn_addr_legal = 1;
+ asoc->scope.ipv4_addr_legal = 0;
+ } else {
+ asoc->scope.conn_addr_legal = 0;
+ asoc->scope.ipv4_addr_legal = 1;
+ }
+#else
+ asoc->scope.ipv4_addr_legal = 1;
+#endif
+ }
+
+ asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
+ asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
+
+ asoc->smallest_mtu = inp->sctp_frag_point;
+ asoc->minrto = inp->sctp_ep.sctp_minrto;
+ asoc->maxrto = inp->sctp_ep.sctp_maxrto;
+
+ asoc->stream_locked_on = 0;
+ asoc->ecn_echo_cnt_onq = 0;
+ asoc->stream_locked = 0;
+
+ asoc->send_sack = 1;
+
+ LIST_INIT(&asoc->sctp_restricted_addrs);
+
+ TAILQ_INIT(&asoc->nets);
+ TAILQ_INIT(&asoc->pending_reply_queue);
+ TAILQ_INIT(&asoc->asconf_ack_sent);
+ /* Setup to fill the hb random cache at first HB */
+ asoc->hb_random_idx = 4;
+
+ asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
+
+ stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
+ stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
+
+ stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
+ stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
+
+ /*
+ * Now the stream parameters, here we allocate space for all streams
+ * that we request by default.
+ */
+ asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
+ o_strms;
+ SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
+ asoc->streamoutcnt * sizeof(struct sctp_stream_out),
+ SCTP_M_STRMO);
+ if (asoc->strmout == NULL) {
+ /* big trouble no memory */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /*
+ * inbound side must be set to 0xffff, also NOTE when we get
+ * the INIT-ACK back (for INIT sender) we MUST reduce the
+ * count (streamoutcnt) but first check if we sent to any of
+ * the upper streams that were dropped (if some were). Those
+ * that were dropped must be notified to the upper layer as
+ * failed to send.
+ */
+ asoc->strmout[i].next_mid_ordered = 0;
+ asoc->strmout[i].next_mid_unordered = 0;
+ TAILQ_INIT(&asoc->strmout[i].outqueue);
+ asoc->strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+ for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+ asoc->strmout[i].abandoned_sent[j] = 0;
+ asoc->strmout[i].abandoned_unsent[j] = 0;
+ }
+#else
+ asoc->strmout[i].abandoned_sent[0] = 0;
+ asoc->strmout[i].abandoned_unsent[0] = 0;
+#endif
+ asoc->strmout[i].sid = i;
+ asoc->strmout[i].last_msg_incomplete = 0;
+ asoc->strmout[i].state = SCTP_STREAM_OPENING;
+ asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
+ }
+ asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
+
+ /* Now the mapping array */
+ asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
+ SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
+ SCTP_M_MAP);
+ if (asoc->mapping_array == NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
+ SCTP_M_MAP);
+ if (asoc->nr_mapping_array == NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
+
+ /* Now the init of the other outqueues */
+ TAILQ_INIT(&asoc->free_chunks);
+ TAILQ_INIT(&asoc->control_send_queue);
+ TAILQ_INIT(&asoc->asconf_send_queue);
+ TAILQ_INIT(&asoc->send_queue);
+ TAILQ_INIT(&asoc->sent_queue);
+ TAILQ_INIT(&asoc->resetHead);
+ asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
+ TAILQ_INIT(&asoc->asconf_queue);
+ /* authentication fields */
+ asoc->authinfo.random = NULL;
+ asoc->authinfo.active_keyid = 0;
+ asoc->authinfo.assoc_key = NULL;
+ asoc->authinfo.assoc_keyid = 0;
+ asoc->authinfo.recv_key = NULL;
+ asoc->authinfo.recv_keyid = 0;
+ LIST_INIT(&asoc->shared_keys);
+ asoc->marked_retrans = 0;
+ asoc->port = inp->sctp_ep.port;
+ asoc->timoinit = 0;
+ asoc->timodata = 0;
+ asoc->timosack = 0;
+ asoc->timoshutdown = 0;
+ asoc->timoheartbeat = 0;
+ asoc->timocookie = 0;
+ asoc->timoshutdownack = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
+ asoc->discontinuity_time = asoc->start_time;
+ for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
+ asoc->abandoned_unsent[i] = 0;
+ asoc->abandoned_sent[i] = 0;
+ }
+ /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
+ * the association is freed.
+ */
+ return (0);
+}
+
+void
+sctp_print_mapping_array(struct sctp_association *asoc)
+{
+ unsigned int i, limit;
+
+ SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
+ asoc->mapping_array_size,
+ asoc->mapping_array_base_tsn,
+ asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map,
+ asoc->highest_tsn_inside_nr_map);
+ for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+ if (asoc->mapping_array[limit - 1] != 0) {
+ break;
+ }
+ }
+ SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+ for (i = 0; i < limit; i++) {
+ SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
+ }
+ if (limit % 16)
+ SCTP_PRINTF("\n");
+ for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+ if (asoc->nr_mapping_array[limit - 1]) {
+ break;
+ }
+ }
+ SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+ for (i = 0; i < limit; i++) {
+ SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
+ }
+ if (limit % 16)
+ SCTP_PRINTF("\n");
+}
+
+int
+sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
+{
+ /* mapping array needs to grow */
+ uint8_t *new_array1, *new_array2;
+ uint32_t new_size;
+
+ new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
+ SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
+ SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
+ if ((new_array1 == NULL) || (new_array2 == NULL)) {
+ /* can't get more, forget it */
+ SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
+ if (new_array1) {
+ SCTP_FREE(new_array1, SCTP_M_MAP);
+ }
+ if (new_array2) {
+ SCTP_FREE(new_array2, SCTP_M_MAP);
+ }
+ return (-1);
+ }
+ memset(new_array1, 0, new_size);
+ memset(new_array2, 0, new_size);
+ memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
+ memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = new_array1;
+ asoc->nr_mapping_array = new_array2;
+ asoc->mapping_array_size = new_size;
+ return (0);
+}
+
+
+static void
+sctp_iterator_work(struct sctp_iterator *it)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_inpcb *tinp;
+ int iteration_count = 0;
+ int inp_skip = 0;
+ int first_in = 1;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ SCTP_INP_INFO_RLOCK();
+ SCTP_ITERATOR_LOCK();
+ sctp_it_ctl.cur_it = it;
+ if (it->inp) {
+ SCTP_INP_RLOCK(it->inp);
+ SCTP_INP_DECR_REF(it->inp);
+ }
+ if (it->inp == NULL) {
+ /* iterator is complete */
+done_with_iterator:
+ sctp_it_ctl.cur_it = NULL;
+ SCTP_ITERATOR_UNLOCK();
+ SCTP_INP_INFO_RUNLOCK();
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it, SCTP_M_ITER);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return;
+ }
+select_a_new_ep:
+ if (first_in) {
+ first_in = 0;
+ } else {
+ SCTP_INP_RLOCK(it->inp);
+ }
+ while (((it->pcb_flags) &&
+ ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
+ ((it->pcb_features) &&
+ ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
+ /* endpoint flags or features don't match, so keep looking */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ SCTP_INP_RUNLOCK(it->inp);
+ goto done_with_iterator;
+ }
+ tinp = it->inp;
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ it->stcb = NULL;
+ SCTP_INP_RUNLOCK(tinp);
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ SCTP_INP_RLOCK(it->inp);
+ }
+ /* now go through each assoc which is in the desired state */
+ if (it->done_current_ep == 0) {
+ if (it->function_inp != NULL)
+ inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
+ it->done_current_ep = 1;
+ }
+ if (it->stcb == NULL) {
+ /* run the per instance function */
+ it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
+ }
+ if ((inp_skip) || it->stcb == NULL) {
+ if (it->function_inp_end != NULL) {
+ inp_skip = (*it->function_inp_end)(it->inp,
+ it->pointer,
+ it->val);
+ }
+ SCTP_INP_RUNLOCK(it->inp);
+ goto no_stcb;
+ }
+ while (it->stcb) {
+ SCTP_TCB_LOCK(it->stcb);
+ if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
+ /* not in the right state... keep looking */
+ SCTP_TCB_UNLOCK(it->stcb);
+ goto next_assoc;
+ }
+ /* see if we have limited out the iterator loop */
+ iteration_count++;
+ if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
+ /* Pause to let others grab the lock */
+ atomic_add_int(&it->stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(it->stcb);
+ SCTP_INP_INCR_REF(it->inp);
+ SCTP_INP_RUNLOCK(it->inp);
+ SCTP_ITERATOR_UNLOCK();
+ SCTP_INP_INFO_RUNLOCK();
+ SCTP_INP_INFO_RLOCK();
+ SCTP_ITERATOR_LOCK();
+ if (sctp_it_ctl.iterator_flags) {
+ /* We won't be staying here */
+ SCTP_INP_DECR_REF(it->inp);
+ atomic_add_int(&it->stcb->asoc.refcnt, -1);
+#if !(defined(__FreeBSD__) && !defined(__Userspace__))
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_MUST_EXIT) {
+ goto done_with_iterator;
+ }
+#endif
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_STOP_CUR_IT) {
+ sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
+ goto done_with_iterator;
+ }
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_STOP_CUR_INP) {
+ sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
+ goto no_stcb;
+ }
+ /* If we reach here huh? */
+ SCTP_PRINTF("Unknown it ctl flag %x\n",
+ sctp_it_ctl.iterator_flags);
+ sctp_it_ctl.iterator_flags = 0;
+ }
+ SCTP_INP_RLOCK(it->inp);
+ SCTP_INP_DECR_REF(it->inp);
+ SCTP_TCB_LOCK(it->stcb);
+ atomic_add_int(&it->stcb->asoc.refcnt, -1);
+ iteration_count = 0;
+ }
+ KASSERT(it->inp == it->stcb->sctp_ep,
+ ("%s: stcb %p does not belong to inp %p, but inp %p",
+ __func__, it->stcb, it->inp, it->stcb->sctp_ep));
+
+ /* run function on this one */
+ (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
+
+ /*
+ * we lie here, it really needs to have its own type but
+ * first I must verify that this won't effect things :-0
+ */
+ if (it->no_chunk_output == 0)
+ sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+
+ SCTP_TCB_UNLOCK(it->stcb);
+ next_assoc:
+ it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
+ if (it->stcb == NULL) {
+ /* Run last function */
+ if (it->function_inp_end != NULL) {
+ inp_skip = (*it->function_inp_end)(it->inp,
+ it->pointer,
+ it->val);
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(it->inp);
+ no_stcb:
+ /* done with all assocs on this endpoint, move on to next endpoint */
+ it->done_current_ep = 0;
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ } else {
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ }
+ it->stcb = NULL;
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ goto select_a_new_ep;
+}
+
+void
+sctp_iterator_worker(void)
+{
+ struct sctp_iterator *it;
+
+ /* This function is called with the WQ lock in place */
+ sctp_it_ctl.iterator_running = 1;
+ while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
+ /* now lets work on this one */
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_SET(it->vn);
+#endif
+ sctp_iterator_work(it);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_RESTORE();
+#endif
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+#if !defined(__FreeBSD__) && !defined(__Userspace__)
+ if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+ break;
+ }
+#endif
+ /*sa_ignore FREED_MEMORY*/
+ }
+ sctp_it_ctl.iterator_running = 0;
+ return;
+}
+
+
+static void
+sctp_handle_addr_wq(void)
+{
+ /* deal with the ADDR wq from the rtsock calls */
+ struct sctp_laddr *wi, *nwi;
+ struct sctp_asconf_iterator *asc;
+
+ SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+ sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
+ if (asc == NULL) {
+ /* Try later, no memory */
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ return;
+ }
+ LIST_INIT(&asc->list_of_work);
+ asc->cnt = 0;
+
+ LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
+ LIST_REMOVE(wi, sctp_nxt_addr);
+ LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+ asc->cnt++;
+ }
+
+ if (asc->cnt == 0) {
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+ } else {
+ int ret;
+
+ ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
+ sctp_asconf_iterator_stcb,
+ NULL, /* No ep end for boundall */
+ SCTP_PCB_FLAGS_BOUNDALL,
+ SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)asc, 0,
+ sctp_asconf_iterator_end, NULL, 0);
+ if (ret) {
+ SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
+ /* Freeing if we are stopping or put back on the addr_wq. */
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ sctp_asconf_iterator_end(asc, 0);
+ } else {
+ LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ }
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+ }
+ }
+ }
+}
+
+/*-
+ * The following table shows which pointers for the inp, stcb, or net are
+ * stored for each timer after it was started.
+ *
+ *|Name |Timer |inp |stcb|net |
+ *|-----------------------------|-----------------------------|----|----|----|
+ *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No |
+ *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No |
+ *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes |
+ *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No |
+ *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No |
+ *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No |
+ *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No |
+ *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No |
+ *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No |
+ *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No |
+ */
+
+void
+sctp_timeout_handler(void *t)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct timeval tv;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctp_timer *tmr;
+ struct mbuf *op_err;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+#if defined(__Userspace__)
+ struct socket *upcall_socket = NULL;
+#endif
+ int type;
+ int i, secret;
+ bool did_output, released_asoc_reference;
+
+ /*
+ * If inp, stcb or net are not NULL, then references to these were
+ * added when the timer was started, and must be released before this
+ * function returns.
+ */
+ tmr = (struct sctp_timer *)t;
+ inp = (struct sctp_inpcb *)tmr->ep;
+ stcb = (struct sctp_tcb *)tmr->tcb;
+ net = (struct sctp_nets *)tmr->net;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_SET((struct vnet *)tmr->vnet);
+#endif
+ did_output = 1;
+ released_asoc_reference = false;
+
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF0, (uint8_t) tmr->type);
+ sctp_auditing(3, inp, stcb, net);
+#endif
+
+ /* sanity checks... */
+ KASSERT(tmr->self == NULL || tmr->self == tmr,
+ ("sctp_timeout_handler: tmr->self corrupted"));
+ KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
+ ("sctp_timeout_handler: invalid timer type %d", tmr->type));
+ type = tmr->type;
+ KASSERT(stcb == NULL || stcb->sctp_ep == inp,
+ ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
+ type, stcb, stcb->sctp_ep));
+ tmr->stopped_from = 0xa001;
+ if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d handler exiting due to CLOSED association.\n",
+ type);
+ goto out_decr;
+ }
+ tmr->stopped_from = 0xa002;
+ SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
+ if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d handler exiting due to not being active.\n",
+ type);
+ goto out_decr;
+ }
+
+ tmr->stopped_from = 0xa003;
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ /*
+ * Release reference so that association can be freed if
+ * necessary below.
+ * This is safe now that we have acquired the lock.
+ */
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ released_asoc_reference = true;
+ if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
+ ((stcb->asoc.state == SCTP_STATE_EMPTY) ||
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d handler exiting due to CLOSED association.\n",
+ type);
+ goto out;
+ }
+ } else if (inp != NULL) {
+ SCTP_INP_WLOCK(inp);
+ } else {
+ SCTP_WQ_ADDR_LOCK();
+ }
+
+ /* Record in stopped_from which timeout occurred. */
+ tmr->stopped_from = type;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ /* mark as being serviced now */
+ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+ /*
+ * Callout has been rescheduled.
+ */
+ goto out;
+ }
+ if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+ /*
+ * Not active, so no action.
+ */
+ goto out;
+ }
+ SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
+
+#if defined(__Userspace__)
+ if ((stcb != NULL) &&
+ !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ upcall_socket = stcb->sctp_socket;
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ }
+#endif
+ /* call the handler for the appropriate timer type */
+ switch (type) {
+ case SCTP_TIMER_TYPE_SEND:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timodata);
+ stcb->asoc.timodata++;
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (sctp_t3rxt_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+
+ goto out_decr;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ if ((stcb->asoc.num_send_timers_up == 0) &&
+ (stcb->asoc.sent_queue_cnt > 0)) {
+ struct sctp_tmit_chunk *chk;
+
+ /*
+ * Safeguard. If there on some on the sent queue
+ * somewhere but no timers running something is
+ * wrong... so we start a timer on the first chunk
+ * on the send queue on whatever net it is sent to.
+ */
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->whoTo != NULL) {
+ break;
+ }
+ }
+ if (chk != NULL) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+ }
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoinit);
+ stcb->asoc.timoinit++;
+ if (sctp_t1init_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ did_output = false;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timosack);
+ stcb->asoc.timosack++;
+ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, NULL);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoshutdown);
+ stcb->asoc.timoshutdown++;
+ if (sctp_shutdown_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoheartbeat);
+ stcb->asoc.timoheartbeat++;
+ if (sctp_heartbeat_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ if (!(net->dest_state & SCTP_ADDR_NOHB)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ } else {
+ did_output = false;
+ }
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timocookie);
+ stcb->asoc.timocookie++;
+ if (sctp_cookie_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ /*
+ * We consider T3 and Cookie timer pretty much the same with
+ * respect to where from in chunk_output.
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ KASSERT(inp != NULL && stcb == NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timosecret);
+ (void)SCTP_GETTIME_TIMEVAL(&tv);
+ inp->sctp_ep.time_of_secret_change = tv.tv_sec;
+ inp->sctp_ep.last_secret_number =
+ inp->sctp_ep.current_secret_number;
+ inp->sctp_ep.current_secret_number++;
+ if (inp->sctp_ep.current_secret_number >=
+ SCTP_HOW_MANY_SECRETS) {
+ inp->sctp_ep.current_secret_number = 0;
+ }
+ secret = (int)inp->sctp_ep.current_secret_number;
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ inp->sctp_ep.secret_key[secret][i] =
+ sctp_select_initial_TSN(&inp->sctp_ep);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+ did_output = false;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timopathmtu);
+ sctp_pathmtu_timer(inp, stcb, net);
+ did_output = false;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ if (sctp_shutdownack_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdownack);
+ stcb->asoc.timoshutdownack++;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ KASSERT(inp != NULL && stcb != NULL && net != NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoasconf);
+ if (sctp_asconf_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoshutdownguard);
+ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+ "Shutdown guard timer expired");
+ sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoautoclose);
+ sctp_autoclose_timer(inp, stcb);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timostrmrst);
+ if (sctp_strreset_timer(inp, stcb)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ KASSERT(inp != NULL && stcb == NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoinpkill);
+ /*
+ * special case, take away our increment since WE are the
+ * killer
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
+#endif
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_FROM_INPKILL_TIMER);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
+#endif
+ inp = NULL;
+ goto out_no_decr;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timoassockill);
+ /* Can we free it yet? */
+ SCTP_INP_DECR_REF(inp);
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ /*
+ * free asoc, always unlocks (or destroy's) so prevent
+ * duplicate unlock or unlock of a free mtx :-0
+ */
+ stcb = NULL;
+ goto out_no_decr;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ KASSERT(inp == NULL && stcb == NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ sctp_handle_addr_wq();
+ did_output = true;
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ KASSERT(inp != NULL && stcb != NULL && net == NULL,
+ ("timeout of type %d: inp = %p, stcb = %p, net = %p",
+ type, inp, stcb, net));
+ SCTP_STAT_INCR(sctps_timodelprim);
+ sctp_delete_prim_timer(inp, stcb);
+ did_output = false;
+ break;
+ default:
+#ifdef INVARIANTS
+ panic("Unknown timer type %d", type);
+#else
+ did_output = false;
+ goto out;
+#endif
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF1, (uint8_t) type);
+ if (inp != NULL)
+ sctp_auditing(5, inp, stcb, net);
+#endif
+ if (did_output && (stcb != NULL) ) {
+ /*
+ * Now we need to clean up the control chunk chain if an
+ * ECNE is on it. It must be marked as UNSENT again so next
+ * call will continue to send it until such time that we get
+ * a CWR, to remove it. It is, however, less likely that we
+ * will find a ecn echo on the chain though.
+ */
+ sctp_fix_ecn_echo(&stcb->asoc);
+ }
+out:
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ } else if (inp != NULL) {
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ SCTP_WQ_ADDR_UNLOCK();
+ }
+
+out_decr:
+#if defined(__Userspace__)
+ if (upcall_socket != NULL) {
+ if ((upcall_socket->so_upcall != NULL) &&
+ (upcall_socket->so_error != 0)) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ /* These reference counts were incremented in sctp_timer_start(). */
+ if (inp != NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ if ((stcb != NULL) && !released_asoc_reference) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+ if (net != NULL) {
+ sctp_free_remote_addr(net);
+ }
+out_no_decr:
+ SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ CURVNET_RESTORE();
+ NET_EPOCH_EXIT(et);
+#endif
+}
+
+/*-
+ * The following table shows which parameters must be provided
+ * when calling sctp_timer_start(). For parameters not being
+ * provided, NULL must be used.
+ *
+ * |Name |inp |stcb|net |
+ * |-----------------------------|----|----|----|
+ * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
+ * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
+ * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
+ * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
+ *
+ */
+
+void
+sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_timer *tmr;
+ uint32_t to_ticks;
+ uint32_t rndval, jitter;
+
+ KASSERT(stcb == NULL || stcb->sctp_ep == inp,
+ ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
+ t_type, stcb, stcb->sctp_ep));
+ tmr = NULL;
+ to_ticks = 0;
+ if (stcb != NULL) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ } else if (inp != NULL) {
+ SCTP_INP_WLOCK_ASSERT(inp);
+ } else {
+ SCTP_WQ_ADDR_LOCK_ASSERT();
+ }
+ if (stcb != NULL) {
+ /* Don't restart timer on association that's about to be killed. */
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ /* Don't restart timer on net that's been removed. */
+ if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ }
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_SEND:
+ /* Here we use the RTO timer. */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ /*
+ * Here we use the INIT timer default usually about 1
+ * second.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ /*
+ * Here we use the Delayed-Ack timer value from the inp,
+ * ususually about 200ms.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.dack_timer;
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ /* Here we use the RTO of the destination. */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ /*
+ * The net is used here so that we can add in the RTO. Even
+ * though we use a different timer. We also add the HB timer
+ * PLUS a random jitter.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ if ((net->dest_state & SCTP_ADDR_NOHB) &&
+ !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ tmr = &net->hb_timer;
+ if (net->RTO == 0) {
+ to_ticks = stcb->asoc.initial_rto;
+ } else {
+ to_ticks = net->RTO;
+ }
+ rndval = sctp_select_initial_TSN(&inp->sctp_ep);
+ jitter = rndval % to_ticks;
+ if (jitter >= (to_ticks >> 1)) {
+ to_ticks = to_ticks + (jitter - (to_ticks >> 1));
+ } else {
+ to_ticks = to_ticks - jitter;
+ }
+ if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ !(net->dest_state & SCTP_ADDR_PF)) {
+ to_ticks += net->heart_beat_delay;
+ }
+ /*
+ * Now we must convert the to_ticks that are now in
+ * ms to ticks.
+ */
+ to_ticks = sctp_msecs_to_ticks(to_ticks);
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ /*
+ * Here we can use the RTO timer from the network since one
+ * RTT was complete. If a retransmission happened then we will
+ * be using the RTO initial value.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ /*
+ * Nothing needed but the endpoint here ususually about 60
+ * minutes.
+ */
+ if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ /*
+ * Here we use the value found in the EP for PMTUD, ususually
+ * about 10 minutes.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ tmr = &net->pmtu_timer;
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ /* Here we use the RTO of the destination. */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ /*
+ * Here the timer comes from the stcb but its value is from
+ * the net's RTO.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ /*
+ * Here we use the endpoints shutdown guard timer usually
+ * about 3 minutes.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.shut_guard_timer;
+ if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
+ if (stcb->asoc.maxrto < UINT32_MAX / 5) {
+ to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
+ }
+ } else {
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
+ }
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.autoclose_timer;
+ to_ticks = stcb->asoc.sctp_autoclose_ticks;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ /*
+ * Here the timer comes from the stcb but its value is from
+ * the net's RTO.
+ */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ if (net->RTO == 0) {
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = sctp_msecs_to_ticks(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ /* Only 1 tick away :-) */
+ tmr = &SCTP_BASE_INFO(addr_wq_timer);
+ to_ticks = SCTP_ADDRESS_TICK_DELAY;
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.delete_prim_timer;
+ to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
+ break;
+ default:
+#ifdef INVARIANTS
+ panic("Unknown timer type %d", t_type);
+#else
+ return;
+#endif
+ }
+ KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
+ KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
+ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+ /*
+ * We do NOT allow you to have it already running. If it is,
+ * we leave the current one up unchanged.
+ */
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ /* At this point we can proceed. */
+ if (t_type == SCTP_TIMER_TYPE_SEND) {
+ stcb->asoc.num_send_timers_up++;
+ }
+ tmr->stopped_from = 0;
+ tmr->type = t_type;
+ tmr->ep = (void *)inp;
+ tmr->tcb = (void *)stcb;
+ if (t_type == SCTP_TIMER_TYPE_STRRESET) {
+ tmr->net = NULL;
+ } else {
+ tmr->net = (void *)net;
+ }
+ tmr->self = (void *)tmr;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ tmr->vnet = (void *)curvnet;
+#endif
+ tmr->ticks = sctp_get_tick_count();
+ if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
+ t_type, to_ticks, inp, stcb, net);
+ /*
+ * If this is a newly scheduled callout, as opposed to a
+ * rescheduled one, increment relevant reference counts.
+ */
+ if (tmr->ep != NULL) {
+ SCTP_INP_INCR_REF(inp);
+ }
+ if (tmr->tcb != NULL) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ }
+ if (tmr->net != NULL) {
+ atomic_add_int(&net->ref_count, 1);
+ }
+ } else {
+ /*
+ * This should not happen, since we checked for pending
+ * above.
+ */
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
+ t_type, to_ticks, inp, stcb, net);
+ }
+ return;
+}
+
+/*-
+ * The following table shows which parameters must be provided
+ * when calling sctp_timer_stop(). For parameters not being
+ * provided, NULL must be used.
+ *
+ * |Name |inp |stcb|net |
+ * |-----------------------------|----|----|----|
+ * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
+ * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
+ * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
+ * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
+ * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
+ * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
+ *
+ */
+
+void
+sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint32_t from)
+{
+ struct sctp_timer *tmr;
+
+ KASSERT(stcb == NULL || stcb->sctp_ep == inp,
+ ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
+ t_type, stcb, stcb->sctp_ep));
+ if (stcb != NULL) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ } else if (inp != NULL) {
+ SCTP_INP_WLOCK_ASSERT(inp);
+ } else {
+ SCTP_WQ_ADDR_LOCK_ASSERT();
+ }
+ tmr = NULL;
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_SEND:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.dack_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->hb_timer;
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->pmtu_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.shut_guard_timer;
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.autoclose_timer;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &SCTP_BASE_INFO(addr_wq_timer);
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
+#ifdef INVARIANTS
+ panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
+ t_type, inp, stcb, net);
+#else
+ return;
+#endif
+ }
+ tmr = &stcb->asoc.delete_prim_timer;
+ break;
+ default:
+#ifdef INVARIANTS
+ panic("Unknown timer type %d", t_type);
+#else
+ return;
+#endif
+ }
+ KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
+ if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
+ (tmr->type != t_type)) {
+ /*
+ * Ok we have a timer that is under joint use. Cookie timer
+ * per chance with the SEND timer. We therefore are NOT
+ * running the timer that the caller wants stopped. So just
+ * return.
+ */
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ return;
+ }
+ if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ }
+ tmr->self = NULL;
+ tmr->stopped_from = from;
+ if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
+ KASSERT(tmr->ep == inp,
+ ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
+ t_type, inp, tmr->ep));
+ KASSERT(tmr->tcb == stcb,
+ ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
+ t_type, stcb, tmr->tcb));
+ KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
+ ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
+ ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
+ t_type, net, tmr->net));
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ /*
+ * If the timer was actually stopped, decrement reference counts
+ * that were incremented in sctp_timer_start().
+ */
+ if (tmr->ep != NULL) {
+ SCTP_INP_DECR_REF(inp);
+ tmr->ep = NULL;
+ }
+ if (tmr->tcb != NULL) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ tmr->tcb = NULL;
+ }
+ if (tmr->net != NULL) {
+ /*
+ * Can't use net, since it doesn't work for
+ * SCTP_TIMER_TYPE_ASCONF.
+ */
+ sctp_free_remote_addr((struct sctp_nets *)tmr->net);
+ tmr->net = NULL;
+ }
+ } else {
+ SCTPDBG(SCTP_DEBUG_TIMER2,
+ "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
+ t_type, inp, stcb, net);
+ }
+ return;
+}
+
+uint32_t
+sctp_calculate_len(struct mbuf *m)
+{
+ uint32_t tlen = 0;
+ struct mbuf *at;
+
+ at = m;
+ while (at) {
+ tlen += SCTP_BUF_LEN(at);
+ at = SCTP_BUF_NEXT(at);
+ }
+ return (tlen);
+}
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *inp,
+ struct sctp_association *asoc, uint32_t mtu)
+{
+ /*
+ * Reset the P-MTU size on this association, this involves changing
+ * the asoc MTU, going through ANY chunk+overhead larger than mtu to
+ * allow the DF flag to be cleared.
+ */
+ struct sctp_tmit_chunk *chk;
+ unsigned int eff_mtu, ovh;
+
+ asoc->smallest_mtu = mtu;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ eff_mtu = mtu - ovh;
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+}
+
+
+/*
+ * Given an association and starting time of the current RTT period, update
+ * RTO in number of msecs. net should point to the current network.
+ * Return 1, if an RTO update was performed, return 0 if no update was
+ * performed due to invalid starting point.
+ */
+
+int
+sctp_calculate_rto(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_nets *net,
+ struct timeval *old,
+ int rtt_from_sack)
+{
+ struct timeval now;
+ uint64_t rtt_us; /* RTT in us */
+ int32_t rtt; /* RTT in ms */
+ uint32_t new_rto;
+ int first_measure = 0;
+
+ /************************/
+ /* 1. calculate new RTT */
+ /************************/
+ /* get the current time */
+ if (stcb->asoc.use_precise_time) {
+ (void)SCTP_GETPTIME_TIMEVAL(&now);
+ } else {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ }
+ if ((old->tv_sec > now.tv_sec) ||
+ ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
+ /* The starting point is in the future. */
+ return (0);
+ }
+ timevalsub(&now, old);
+ rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
+ if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
+ /* The RTT is larger than a sane value. */
+ return (0);
+ }
+ /* store the current RTT in us */
+ net->rtt = rtt_us;
+ /* compute rtt in ms */
+ rtt = (int32_t)(net->rtt / 1000);
+ if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
+ /* Tell the CC module that a new update has just occurred from a sack */
+ (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
+ }
+ /* Do we need to determine the lan? We do this only
+ * on sacks i.e. RTT being determined from data not
+ * non-data (HB/INIT->INITACK).
+ */
+ if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
+ (net->lan_type == SCTP_LAN_UNKNOWN)) {
+ if (net->rtt > SCTP_LOCAL_LAN_RTT) {
+ net->lan_type = SCTP_LAN_INTERNET;
+ } else {
+ net->lan_type = SCTP_LAN_LOCAL;
+ }
+ }
+
+ /***************************/
+ /* 2. update RTTVAR & SRTT */
+ /***************************/
+ /*-
+ * Compute the scaled average lastsa and the
+ * scaled variance lastsv as described in van Jacobson
+ * Paper "Congestion Avoidance and Control", Annex A.
+ *
+ * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
+ * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
+ */
+ if (net->RTO_measured) {
+ rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
+ net->lastsa += rtt;
+ if (rtt < 0) {
+ rtt = -rtt;
+ }
+ rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
+ net->lastsv += rtt;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+ rto_logging(net, SCTP_LOG_RTTVAR);
+ }
+ } else {
+ /* First RTO measurment */
+ net->RTO_measured = 1;
+ first_measure = 1;
+ net->lastsa = rtt << SCTP_RTT_SHIFT;
+ net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+ rto_logging(net, SCTP_LOG_INITIAL_RTT);
+ }
+ }
+ if (net->lastsv == 0) {
+ net->lastsv = SCTP_CLOCK_GRANULARITY;
+ }
+ new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+ if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
+ (stcb->asoc.sat_network_lockout == 0)) {
+ stcb->asoc.sat_network = 1;
+ } else if ((!first_measure) && stcb->asoc.sat_network) {
+ stcb->asoc.sat_network = 0;
+ stcb->asoc.sat_network_lockout = 1;
+ }
+ /* bound it, per C6/C7 in Section 5.3.1 */
+ if (new_rto < stcb->asoc.minrto) {
+ new_rto = stcb->asoc.minrto;
+ }
+ if (new_rto > stcb->asoc.maxrto) {
+ new_rto = stcb->asoc.maxrto;
+ }
+ net->RTO = new_rto;
+ return (1);
+}
+
+/*
+ * return a pointer to a contiguous piece of data from the given mbuf chain
+ * starting at 'off' for 'len' bytes. If the desired piece spans more than
+ * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
+ * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
+ */
+caddr_t
+sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
+{
+ uint32_t count;
+ uint8_t *ptr;
+
+ ptr = in_ptr;
+ if ((off < 0) || (len <= 0))
+ return (NULL);
+
+ /* find the desired start location */
+ while ((m != NULL) && (off > 0)) {
+ if (off < SCTP_BUF_LEN(m))
+ break;
+ off -= SCTP_BUF_LEN(m);
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (m == NULL)
+ return (NULL);
+
+ /* is the current mbuf large enough (eg. contiguous)? */
+ if ((SCTP_BUF_LEN(m) - off) >= len) {
+ return (mtod(m, caddr_t) + off);
+ } else {
+ /* else, it spans more than one mbuf, so save a temp copy... */
+ while ((m != NULL) && (len > 0)) {
+ count = min(SCTP_BUF_LEN(m) - off, len);
+ memcpy(ptr, mtod(m, caddr_t) + off, count);
+ len -= count;
+ ptr += count;
+ off = 0;
+ m = SCTP_BUF_NEXT(m);
+ }
+ if ((m == NULL) && (len > 0))
+ return (NULL);
+ else
+ return ((caddr_t)in_ptr);
+ }
+}
+
+
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *m,
+ int offset,
+ struct sctp_paramhdr *pull,
+ int pull_limit)
+{
+ /* This just provides a typed signature to Peter's Pull routine */
+ return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
+ (uint8_t *) pull));
+}
+
+
+struct mbuf *
+sctp_add_pad_tombuf(struct mbuf *m, int padlen)
+{
+ struct mbuf *m_last;
+ caddr_t dp;
+
+ if (padlen > 3) {
+ return (NULL);
+ }
+ if (padlen <= M_TRAILINGSPACE(m)) {
+ /*
+ * The easy way. We hope the majority of the time we hit
+ * here :)
+ */
+ m_last = m;
+ } else {
+ /* Hard way we must grow the mbuf chain */
+ m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_last == NULL) {
+ return (NULL);
+ }
+ SCTP_BUF_LEN(m_last) = 0;
+ SCTP_BUF_NEXT(m_last) = NULL;
+ SCTP_BUF_NEXT(m) = m_last;
+ }
+ dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
+ SCTP_BUF_LEN(m_last) += padlen;
+ memset(dp, 0, padlen);
+ return (m_last);
+}
+
+struct mbuf *
+sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
+{
+ /* find the last mbuf in chain and pad it */
+ struct mbuf *m_at;
+
+ if (last_mbuf != NULL) {
+ return (sctp_add_pad_tombuf(last_mbuf, padval));
+ } else {
+ for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ return (sctp_add_pad_tombuf(m_at, padval));
+ }
+ }
+ }
+ return (NULL);
+}
+
+static void
+sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
+ uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_assoc_change *sac;
+ struct sctp_queued_to_read *control;
+ unsigned int notif_len;
+ uint16_t abort_len;
+ unsigned int i;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ if (stcb == NULL) {
+ return;
+ }
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
+ notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
+ if (abort != NULL) {
+ abort_len = ntohs(abort->ch.chunk_length);
+ /*
+ * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
+ * contiguous.
+ */
+ if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
+ abort_len = SCTP_CHUNK_BUFFER_SIZE;
+ }
+ } else {
+ abort_len = 0;
+ }
+ if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
+ notif_len += SCTP_ASSOC_SUPPORTS_MAX;
+ } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
+ notif_len += abort_len;
+ }
+ m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* Retry with smaller value. */
+ notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
+ m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ goto set_error;
+ }
+ }
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ sac = mtod(m_notify, struct sctp_assoc_change *);
+ memset(sac, 0, notif_len);
+ sac->sac_type = SCTP_ASSOC_CHANGE;
+ sac->sac_flags = 0;
+ sac->sac_length = sizeof(struct sctp_assoc_change);
+ sac->sac_state = state;
+ sac->sac_error = error;
+ /* XXX verify these stream counts */
+ sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
+ sac->sac_inbound_streams = stcb->asoc.streamincnt;
+ sac->sac_assoc_id = sctp_get_associd(stcb);
+ if (notif_len > sizeof(struct sctp_assoc_change)) {
+ if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
+ i = 0;
+ if (stcb->asoc.prsctp_supported == 1) {
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
+ }
+ if (stcb->asoc.auth_supported == 1) {
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
+ }
+ if (stcb->asoc.asconf_supported == 1) {
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
+ }
+ if (stcb->asoc.idata_supported == 1) {
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
+ }
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
+ if (stcb->asoc.reconfig_supported == 1) {
+ sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
+ }
+ sac->sac_length += i;
+ } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
+ memcpy(sac->sac_info, abort, abort_len);
+ sac->sac_length += abort_len;
+ }
+ }
+ SCTP_BUF_LEN(m_notify) = sac->sac_length;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control != NULL) {
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
+ so_locked);
+ } else {
+ sctp_m_freem(m_notify);
+ }
+ }
+ /*
+ * For 1-to-1 style sockets, we send up and error when an ABORT
+ * comes in.
+ */
+set_error:
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
+ SOCK_LOCK(stcb->sctp_socket);
+ if (from_peer) {
+ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
+ stcb->sctp_socket->so_error = ECONNREFUSED;
+ } else {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+ stcb->sctp_socket->so_error = ECONNRESET;
+ }
+ } else {
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
+ stcb->sctp_socket->so_error = ETIMEDOUT;
+ } else {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
+ stcb->sctp_socket->so_error = ECONNABORTED;
+ }
+ }
+ SOCK_UNLOCK(stcb->sctp_socket);
+ }
+ /* Wake ANY sleepers */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
+ socantrcvmore(stcb->sctp_socket);
+ }
+ sorwakeup(stcb->sctp_socket);
+ sowwakeup(stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+}
+
+static void
+sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
+ struct sockaddr *sa, uint32_t error, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_paddr_change *spc;
+ struct sctp_queued_to_read *control;
+
+ if ((stcb == NULL) ||
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ spc = mtod(m_notify, struct sctp_paddr_change *);
+ memset(spc, 0, sizeof(struct sctp_paddr_change));
+ spc->spc_type = SCTP_PEER_ADDR_CHANGE;
+ spc->spc_flags = 0;
+ spc->spc_length = sizeof(struct sctp_paddr_change);
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
+ (struct sockaddr_in6 *)&spc->spc_aaddr);
+ } else {
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+ }
+#else
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ struct sockaddr_in6 *sin6;
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
+
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+ sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+ if (sin6->sin6_scope_id == 0) {
+ /* recover scope_id for user */
+#ifdef SCTP_KAME
+ (void)sa6_recoverscope(sin6);
+#else
+ (void)in6_recoverscope(sin6, &sin6->sin6_addr,
+ NULL);
+#endif
+ } else {
+ /* clear embedded scope_id for user */
+ in6_clearscope(&sin6->sin6_addr);
+ }
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ spc->spc_state = state;
+ spc->spc_error = error;
+ spc->spc_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ so_locked);
+}
+
+
+static void
+sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
+ struct sctp_tmit_chunk *chk, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_send_failed_event *ssfe;
+ struct sctp_queued_to_read *control;
+ struct sctp_chunkhdr *chkhdr;
+ int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
+
+ if ((stcb == NULL) ||
+ (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
+ /* event not enabled */
+ return;
+ }
+
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+ notifhdr_len = sizeof(struct sctp_send_failed_event);
+ } else {
+ notifhdr_len = sizeof(struct sctp_send_failed);
+ }
+ m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = notifhdr_len;
+ if (stcb->asoc.idata_supported) {
+ chkhdr_len = sizeof(struct sctp_idata_chunk);
+ } else {
+ chkhdr_len = sizeof(struct sctp_data_chunk);
+ }
+ /* Use some defaults in case we can't access the chunk header */
+ if (chk->send_size >= chkhdr_len) {
+ payload_len = chk->send_size - chkhdr_len;
+ } else {
+ payload_len = 0;
+ }
+ padding_len = 0;
+ if (chk->data != NULL) {
+ chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
+ if (chkhdr != NULL) {
+ chk_len = ntohs(chkhdr->chunk_length);
+ if ((chk_len >= chkhdr_len) &&
+ (chk->send_size >= chk_len) &&
+ (chk->send_size - chk_len < 4)) {
+ padding_len = chk->send_size - chk_len;
+ payload_len = chk->send_size - chkhdr_len - padding_len;
+ }
+ }
+ }
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+ ssfe = mtod(m_notify, struct sctp_send_failed_event *);
+ memset(ssfe, 0, notifhdr_len);
+ ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
+ if (sent) {
+ ssfe->ssfe_flags = SCTP_DATA_SENT;
+ } else {
+ ssfe->ssfe_flags = SCTP_DATA_UNSENT;
+ }
+ ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
+ ssfe->ssfe_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
+ ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
+ ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
+ ssfe->ssfe_info.snd_context = chk->rec.data.context;
+ ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
+ ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
+ } else {
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ memset(ssf, 0, notifhdr_len);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ if (sent) {
+ ssf->ssf_flags = SCTP_DATA_SENT;
+ } else {
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ }
+ ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
+ ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
+ ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
+ ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
+ ssf->ssf_info.sinfo_context = chk->rec.data.context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+ }
+ if (chk->data != NULL) {
+ /* Trim off the sctp chunk header (it should be there) */
+ if (chk->send_size == chkhdr_len + payload_len + padding_len) {
+ m_adj(chk->data, chkhdr_len);
+ m_adj(chk->data, -padding_len);
+ sctp_mbuf_crush(chk->data);
+ chk->send_size -= (chkhdr_len + padding_len);
+ }
+ }
+ SCTP_BUF_NEXT(m_notify) = chk->data;
+ /* Steal off the mbuf */
+ chk->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ so_locked);
+}
+
+
+static void
+sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
+ struct sctp_stream_queue_pending *sp, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_send_failed_event *ssfe;
+ struct sctp_queued_to_read *control;
+ int notifhdr_len;
+
+ if ((stcb == NULL) ||
+ (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
+ /* event not enabled */
+ return;
+ }
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+ notifhdr_len = sizeof(struct sctp_send_failed_event);
+ } else {
+ notifhdr_len = sizeof(struct sctp_send_failed);
+ }
+ m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* no space left */
+ return;
+ }
+ SCTP_BUF_LEN(m_notify) = notifhdr_len;
+ if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+ ssfe = mtod(m_notify, struct sctp_send_failed_event *);
+ memset(ssfe, 0, notifhdr_len);
+ ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
+ ssfe->ssfe_flags = SCTP_DATA_UNSENT;
+ ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
+ ssfe->ssfe_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssfe->ssfe_info.snd_sid = sp->sid;
+ if (sp->some_taken) {
+ ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
+ } else {
+ ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
+ }
+ ssfe->ssfe_info.snd_ppid = sp->ppid;
+ ssfe->ssfe_info.snd_context = sp->context;
+ ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
+ ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
+ } else {
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ memset(ssf, 0, notifhdr_len);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssf->ssf_info.sinfo_stream = sp->sid;
+ ssf->ssf_info.sinfo_ssn = 0;
+ if (sp->some_taken) {
+ ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
+ } else {
+ ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
+ }
+ ssf->ssf_info.sinfo_ppid = sp->ppid;
+ ssf->ssf_info.sinfo_context = sp->context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+ }
+ SCTP_BUF_NEXT(m_notify) = sp->data;
+
+ /* Steal off the mbuf */
+ sp->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+
+static void
+sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
+{
+ struct mbuf *m_notify;
+ struct sctp_adaptation_event *sai;
+ struct sctp_queued_to_read *control;
+
+ if ((stcb == NULL) ||
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
+ /* event not enabled */
+ return;
+ }
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ sai = mtod(m_notify, struct sctp_adaptation_event *);
+ memset(sai, 0, sizeof(struct sctp_adaptation_event));
+ sai->sai_type = SCTP_ADAPTATION_INDICATION;
+ sai->sai_flags = 0;
+ sai->sai_length = sizeof(struct sctp_adaptation_event);
+ sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
+ sai->sai_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+/* This always must be called with the read-queue LOCKED in the INP */
+static void
+sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
+ uint32_t val, int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_pdapi_event *pdapi;
+ struct sctp_queued_to_read *control;
+ struct sockbuf *sb;
+
+ if ((stcb == NULL) ||
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+ return;
+ }
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ pdapi = mtod(m_notify, struct sctp_pdapi_event *);
+ memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
+ pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+ pdapi->pdapi_flags = 0;
+ pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
+ pdapi->pdapi_indication = error;
+ pdapi->pdapi_stream = (val >> 16);
+ pdapi->pdapi_seq = (val & 0x0000ffff);
+ pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sb = &stcb->sctp_socket->so_rcv;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
+ }
+ sctp_sballoc(stcb, sb, m_notify);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ control->end_added = 1;
+ if (stcb->asoc.control_pdapi)
+ TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
+ else {
+ /* we really should not see this case */
+ TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
+ }
+ if (stcb->sctp_ep && stcb->sctp_socket) {
+ /* This should always be the case */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+}
+
+static void
+sctp_notify_shutdown_event(struct sctp_tcb *stcb)
+{
+ struct mbuf *m_notify;
+ struct sctp_shutdown_event *sse;
+ struct sctp_queued_to_read *control;
+
+ /*
+ * For TCP model AND UDP connected sockets we will send an error up
+ * when an SHUTDOWN completes
+ */
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* mark socket closed for read/write and wakeup! */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ socantsendmore(stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
+ /* event not enabled */
+ return;
+ }
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ sse = mtod(m_notify, struct sctp_shutdown_event *);
+ memset(sse, 0, sizeof(struct sctp_shutdown_event));
+ sse->sse_type = SCTP_SHUTDOWN_EVENT;
+ sse->sse_flags = 0;
+ sse->sse_length = sizeof(struct sctp_shutdown_event);
+ sse->sse_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
+ int so_locked)
+{
+ struct mbuf *m_notify;
+ struct sctp_sender_dry_event *event;
+ struct sctp_queued_to_read *control;
+
+ if ((stcb == NULL) ||
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
+ /* event not enabled */
+ return;
+ }
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* no space left */
+ return;
+ }
+ SCTP_BUF_LEN(m_notify) = 0;
+ event = mtod(m_notify, struct sctp_sender_dry_event *);
+ memset(event, 0, sizeof(struct sctp_sender_dry_event));
+ event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
+ event->sender_dry_flags = 0;
+ event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
+ event->sender_dry_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+void
+sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_stream_change_event *stradd;
+
+ if ((stcb == NULL) ||
+ (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
+ /* event not enabled */
+ return;
+ }
+ if ((stcb->asoc.peer_req_out) && flag) {
+ /* Peer made the request, don't tell the local user */
+ stcb->asoc.peer_req_out = 0;
+ return;
+ }
+ stcb->asoc.peer_req_out = 0;
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ stradd = mtod(m_notify, struct sctp_stream_change_event *);
+ memset(stradd, 0, sizeof(struct sctp_stream_change_event));
+ stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
+ stradd->strchange_flags = flag;
+ stradd->strchange_length = sizeof(struct sctp_stream_change_event);
+ stradd->strchange_assoc_id = sctp_get_associd(stcb);
+ stradd->strchange_instrms = numberin;
+ stradd->strchange_outstrms = numberout;
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+void
+sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_assoc_reset_event *strasoc;
+
+ if ((stcb == NULL) ||
+ (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
+ memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
+ strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
+ strasoc->assocreset_flags = flag;
+ strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
+ strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
+ strasoc->assocreset_local_tsn = sending_tsn;
+ strasoc->assocreset_remote_tsn = recv_tsn;
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+
+static void
+sctp_notify_stream_reset(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_stream_reset_event *strreset;
+ int len;
+
+ if ((stcb == NULL) ||
+ (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
+ /* event not enabled */
+ return;
+ }
+
+ m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
+ if (len > M_TRAILINGSPACE(m_notify)) {
+ /* never enough room */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ strreset = mtod(m_notify, struct sctp_stream_reset_event *);
+ memset(strreset, 0, len);
+ strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+ strreset->strreset_flags = flag;
+ strreset->strreset_length = len;
+ strreset->strreset_assoc_id = sctp_get_associd(stcb);
+ if (number_entries) {
+ int i;
+
+ for (i = 0; i < number_entries; i++) {
+ strreset->strreset_stream_list[i] = ntohs(list[i]);
+ }
+ }
+ SCTP_BUF_LEN(m_notify) = len;
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+static void
+sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
+{
+ struct mbuf *m_notify;
+ struct sctp_remote_error *sre;
+ struct sctp_queued_to_read *control;
+ unsigned int notif_len;
+ uint16_t chunk_len;
+
+ if ((stcb == NULL) ||
+ sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
+ return;
+ }
+ if (chunk != NULL) {
+ chunk_len = ntohs(chunk->ch.chunk_length);
+ /*
+ * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
+ * contiguous.
+ */
+ if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
+ chunk_len = SCTP_CHUNK_BUFFER_SIZE;
+ }
+ } else {
+ chunk_len = 0;
+ }
+ notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
+ m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* Retry with smaller value. */
+ notif_len = (unsigned int)sizeof(struct sctp_remote_error);
+ m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ return;
+ }
+ }
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ sre = mtod(m_notify, struct sctp_remote_error *);
+ memset(sre, 0, notif_len);
+ sre->sre_type = SCTP_REMOTE_ERROR;
+ sre->sre_flags = 0;
+ sre->sre_length = sizeof(struct sctp_remote_error);
+ sre->sre_error = error;
+ sre->sre_assoc_id = sctp_get_associd(stcb);
+ if (notif_len > sizeof(struct sctp_remote_error)) {
+ memcpy(sre->sre_data, chunk, chunk_len);
+ sre->sre_length += chunk_len;
+ }
+ SCTP_BUF_LEN(m_notify) = sre->sre_length;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, stcb->asoc.context, 0, 0, 0,
+ m_notify);
+ if (control != NULL) {
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ } else {
+ sctp_m_freem(m_notify);
+ }
+}
+
+
+void
+sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
+ uint32_t error, void *data, int so_locked)
+{
+ if ((stcb == NULL) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ /* If the socket is gone we are out of here */
+ return;
+ }
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
+#else
+ if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
+#endif
+ return;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ }
+#endif
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
+ (notification == SCTP_NOTIFY_INTERFACE_UP) ||
+ (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
+ /* Don't report these in front states */
+ return;
+ }
+ }
+ switch (notification) {
+ case SCTP_NOTIFY_ASSOC_UP:
+ if (stcb->asoc.assoc_up_sent == 0) {
+ sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
+ stcb->asoc.assoc_up_sent = 1;
+ }
+ if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
+ sctp_notify_adaptation_layer(stcb);
+ }
+ if (stcb->asoc.auth_supported == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_ASSOC_DOWN:
+ sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
+#if defined(__Userspace__)
+ if (stcb->sctp_ep->recv_callback) {
+ if (stcb->sctp_socket) {
+ union sctp_sockstore addr;
+ struct sctp_rcvinfo rcv;
+
+ memset(&addr, 0, sizeof(union sctp_sockstore));
+ memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+ }
+#endif
+ break;
+ case SCTP_NOTIFY_INTERFACE_DOWN:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
+ (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_UP:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
+ (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_CONFIRMED:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
+ (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+ break;
+ }
+ case SCTP_NOTIFY_SPECIAL_SP_FAIL:
+ sctp_notify_send_failed2(stcb, error,
+ (struct sctp_stream_queue_pending *)data, so_locked);
+ break;
+ case SCTP_NOTIFY_SENT_DG_FAIL:
+ sctp_notify_send_failed(stcb, 1, error,
+ (struct sctp_tmit_chunk *)data, so_locked);
+ break;
+ case SCTP_NOTIFY_UNSENT_DG_FAIL:
+ sctp_notify_send_failed(stcb, 0, error,
+ (struct sctp_tmit_chunk *)data, so_locked);
+ break;
+ case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
+ {
+ uint32_t val;
+ val = *((uint32_t *)data);
+
+ sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
+ break;
+ }
+ case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
+ } else {
+ sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_ASSOC_REM_ABORTED:
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
+ sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
+ } else {
+ sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_ASSOC_RESTART:
+ sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
+ if (stcb->asoc.auth_supported == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_STR_RESET_SEND:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
+ break;
+ case SCTP_NOTIFY_STR_RESET_RECV:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
+ break;
+ case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+ (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
+ break;
+ case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+ (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
+ break;
+ case SCTP_NOTIFY_STR_RESET_FAILED_IN:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+ (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
+ break;
+ case SCTP_NOTIFY_STR_RESET_DENIED_IN:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+ (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
+ break;
+ case SCTP_NOTIFY_ASCONF_ADD_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
+ error, so_locked);
+ break;
+ case SCTP_NOTIFY_ASCONF_DELETE_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
+ error, so_locked);
+ break;
+ case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
+ error, so_locked);
+ break;
+ case SCTP_NOTIFY_PEER_SHUTDOWN:
+ sctp_notify_shutdown_event(stcb);
+ break;
+ case SCTP_NOTIFY_AUTH_NEW_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
+ (uint16_t)(uintptr_t)data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_AUTH_FREE_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
+ (uint16_t)(uintptr_t)data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_NO_PEER_AUTH:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
+ (uint16_t)(uintptr_t)data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_SENDER_DRY:
+ sctp_notify_sender_dry_event(stcb, so_locked);
+ break;
+ case SCTP_NOTIFY_REMOTE_ERROR:
+ sctp_notify_remote_error(stcb, error, data);
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
+ __func__, notification, notification);
+ break;
+ } /* end switch */
+}
+
+void
+sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *outs;
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctp_stream_queue_pending *sp, *nsp;
+ int i;
+
+ if (stcb == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* already being freed */
+ return;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ }
+#endif
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* now through all the gunk freeing chunks */
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* sent queue SHOULD be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ asoc->sent_queue_cnt--;
+ if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+ }
+ }
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
+ error, chk, so_locked);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ /* pending send queue SHOULD be empty */
+ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+ }
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+ error, chk, so_locked);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk, so_locked);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /* For each stream */
+ outs = &asoc->strmout[i];
+ /* clean up any sends there */
+ TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
+ sctp_free_spbufspace(stcb, asoc, sp);
+ if (sp->data) {
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+ error, (void *)sp, so_locked);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ sp->length = 0;
+ }
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ /* Free the chunk */
+ sctp_free_a_strmoq(stcb, sp, so_locked);
+ /*sa_ignore FREED_MEMORY*/
+ }
+ }
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+}
+
+void
+sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
+ struct sctp_abort_chunk *abort, int so_locked)
+{
+ if (stcb == NULL) {
+ return;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ }
+#endif
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* Tell them we lost the asoc */
+ sctp_report_all_outbound(stcb, error, 0, so_locked);
+ if (from_peer) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
+ }
+}
+
+void
+sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *m, int iphlen,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct mbuf *op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ uint32_t vtag;
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+ vtag = 0;
+ if (stcb != NULL) {
+ vtag = stcb->asoc.peer_vtag;
+ vrf_id = stcb->asoc.vrf_id;
+ }
+ sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, inp->fibnum,
+#endif
+ vrf_id, port);
+ if (stcb != NULL) {
+ /* We have a TCB to abort, send notification too */
+ sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
+ /* Ok, now lets free it */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void
+sctp_print_out_track_log(struct sctp_tcb *stcb)
+{
+#ifdef NOSIY_PRINTS
+ int i;
+ SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
+ SCTP_PRINTF("IN bound TSN log-aaa\n");
+ if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
+ SCTP_PRINTF("None rcvd\n");
+ goto none_in;
+ }
+ if (stcb->asoc.tsn_in_wrapped) {
+ for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.in_tsnlog[i].tsn,
+ stcb->asoc.in_tsnlog[i].strm,
+ stcb->asoc.in_tsnlog[i].seq,
+ stcb->asoc.in_tsnlog[i].flgs,
+ stcb->asoc.in_tsnlog[i].sz);
+ }
+ }
+ if (stcb->asoc.tsn_in_at) {
+ for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.in_tsnlog[i].tsn,
+ stcb->asoc.in_tsnlog[i].strm,
+ stcb->asoc.in_tsnlog[i].seq,
+ stcb->asoc.in_tsnlog[i].flgs,
+ stcb->asoc.in_tsnlog[i].sz);
+ }
+ }
+ none_in:
+ SCTP_PRINTF("OUT bound TSN log-aaa\n");
+ if ((stcb->asoc.tsn_out_at == 0) &&
+ (stcb->asoc.tsn_out_wrapped == 0)) {
+ SCTP_PRINTF("None sent\n");
+ }
+ if (stcb->asoc.tsn_out_wrapped) {
+ for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.out_tsnlog[i].tsn,
+ stcb->asoc.out_tsnlog[i].strm,
+ stcb->asoc.out_tsnlog[i].seq,
+ stcb->asoc.out_tsnlog[i].flgs,
+ stcb->asoc.out_tsnlog[i].sz);
+ }
+ }
+ if (stcb->asoc.tsn_out_at) {
+ for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.out_tsnlog[i].tsn,
+ stcb->asoc.out_tsnlog[i].strm,
+ stcb->asoc.out_tsnlog[i].seq,
+ stcb->asoc.out_tsnlog[i].flgs,
+ stcb->asoc.out_tsnlog[i].sz);
+ }
+ }
+#endif
+}
+#endif
+
+void
+sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *op_err,
+ int so_locked)
+{
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so = SCTP_INP_SO(inp);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ if (stcb == NULL) {
+ /* Got to have a TCB */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_LOCK(so, 1);
+ }
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+ }
+ return;
+ } else {
+ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
+ }
+ /* notify the peer */
+ sctp_send_abort_tcb(stcb, op_err, so_locked);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ /* notify the ulp */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
+ }
+ /* now free the asoc */
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ sctp_print_out_track_log(stcb);
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+}
+
+void
+sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
+ struct sockaddr *src, struct sockaddr *dst,
+ struct sctphdr *sh, struct sctp_inpcb *inp,
+ struct mbuf *cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_chunkhdr *ch, chunk_buf;
+ unsigned int chk_length;
+ int contains_init_chunk;
+
+ SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
+ /* Generate a TO address for future reference */
+ if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
+#endif
+ }
+ }
+ contains_init_chunk = 0;
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* break to abort land */
+ break;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_INIT:
+ contains_init_chunk = 1;
+ break;
+ case SCTP_PACKET_DROPPED:
+ /* we don't respond to pkt-dropped */
+ return;
+ case SCTP_ABORT_ASSOCIATION:
+ /* we don't respond with an ABORT to an ABORT */
+ return;
+ case SCTP_SHUTDOWN_COMPLETE:
+ /*
+ * we ignore it since we are not waiting for it and
+ * peer is gone
+ */
+ return;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_complete2(src, dst, sh,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ return;
+ default:
+ break;
+ }
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
+ ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
+ (contains_init_chunk == 0))) {
+ sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ }
+}
+
+/*
+ * check the inbound datagram to make sure there is not an abort inside it,
+ * if there is return 1, else return 0.
+ */
+int
+sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
+{
+ struct sctp_chunkhdr *ch;
+ struct sctp_init_chunk *init_chk, chunk_buf;
+ int offset;
+ unsigned int chk_length;
+
+ offset = iphlen + sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
+ (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* packet is probably corrupt */
+ break;
+ }
+ /* we seem to be ok, is it an abort? */
+ if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
+ /* yep, tell them */
+ return (1);
+ }
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /* need to update the Vtag */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
+ if (init_chk != NULL) {
+ *vtagfill = ntohl(init_chk->init.initiate_tag);
+ }
+ }
+ /* Nope, move to the next chunk */
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ return (0);
+}
+
+/*
+ * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
+ * set (i.e. it's 0) so, create this function to compare link local scopes
+ */
+#ifdef INET6
+uint32_t
+sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
+{
+#if defined(__Userspace__)
+ /*__Userspace__ Returning 1 here always */
+#endif
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ struct sockaddr_in6 a, b;
+
+ /* save copies */
+ a = *addr1;
+ b = *addr2;
+
+ if (a.sin6_scope_id == 0)
+#ifdef SCTP_KAME
+ if (sa6_recoverscope(&a)) {
+#else
+ if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
+#endif /* SCTP_KAME */
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (b.sin6_scope_id == 0)
+#ifdef SCTP_KAME
+ if (sa6_recoverscope(&b)) {
+#else
+ if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
+#endif /* SCTP_KAME */
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (a.sin6_scope_id != b.sin6_scope_id)
+ return (0);
+#else
+ if (addr1->sin6_scope_id != addr2->sin6_scope_id)
+ return (0);
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+
+ return (1);
+}
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+/*
+ * returns a sockaddr_in6 with embedded scope recovered and removed
+ */
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
+{
+ /* check and strip embedded scope junk */
+ if (addr->sin6_family == AF_INET6) {
+ if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
+ if (addr->sin6_scope_id == 0) {
+ *store = *addr;
+#ifdef SCTP_KAME
+ if (!sa6_recoverscope(store)) {
+#else
+ if (!in6_recoverscope(store, &store->sin6_addr,
+ NULL)) {
+#endif /* SCTP_KAME */
+ /* use the recovered scope */
+ addr = store;
+ }
+ } else {
+ /* else, return the original "to" addr */
+ in6_clearscope(&addr->sin6_addr);
+ }
+ }
+ }
+ return (addr);
+}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+
+/*
+ * are the two addresses the same? currently a "scopeless" check returns: 1
+ * if same, 0 if not
+ */
+int
+sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
+{
+
+ /* must be valid */
+ if (sa1 == NULL || sa2 == NULL)
+ return (0);
+
+ /* must be the same family */
+ if (sa1->sa_family != sa2->sa_family)
+ return (0);
+
+ switch (sa1->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* IPv6 addresses */
+ struct sockaddr_in6 *sin6_1, *sin6_2;
+
+ sin6_1 = (struct sockaddr_in6 *)sa1;
+ sin6_2 = (struct sockaddr_in6 *)sa2;
+ return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
+ sin6_2));
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ /* IPv4 addresses */
+ struct sockaddr_in *sin_1, *sin_2;
+
+ sin_1 = (struct sockaddr_in *)sa1;
+ sin_2 = (struct sockaddr_in *)sa2;
+ return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn_1, *sconn_2;
+
+ sconn_1 = (struct sockaddr_conn *)sa1;
+ sconn_2 = (struct sockaddr_conn *)sa2;
+ return (sconn_1->sconn_addr == sconn_2->sconn_addr);
+ }
+#endif
+ default:
+ /* we don't do these... */
+ return (0);
+ }
+}
+
+void
+sctp_print_address(struct sockaddr *sa)
+{
+#ifdef INET6
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ char ip6buf[INET6_ADDRSTRLEN];
+#endif
+#endif
+
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+#if defined(__Userspace__)
+ SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
+ ntohs(sin6->sin6_addr.s6_addr16[0]),
+ ntohs(sin6->sin6_addr.s6_addr16[1]),
+ ntohs(sin6->sin6_addr.s6_addr16[2]),
+ ntohs(sin6->sin6_addr.s6_addr16[3]),
+ ntohs(sin6->sin6_addr.s6_addr16[4]),
+ ntohs(sin6->sin6_addr.s6_addr16[5]),
+ ntohs(sin6->sin6_addr.s6_addr16[6]),
+ ntohs(sin6->sin6_addr.s6_addr16[7]),
+ ntohs(sin6->sin6_port),
+ sin6->sin6_scope_id);
+#else
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
+ ip6_sprintf(ip6buf, &sin6->sin6_addr),
+ ntohs(sin6->sin6_port),
+ sin6->sin6_scope_id);
+#else
+ SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
+ ip6_sprintf(&sin6->sin6_addr),
+ ntohs(sin6->sin6_port),
+ sin6->sin6_scope_id);
+#endif
+#endif
+ break;
+ }
+#endif
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+ unsigned char *p;
+
+ sin = (struct sockaddr_in *)sa;
+ p = (unsigned char *)&sin->sin_addr;
+ SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
+ p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
+ break;
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+
+ sconn = (struct sockaddr_conn *)sa;
+ SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
+ break;
+ }
+#endif
+ default:
+ SCTP_PRINTF("?\n");
+ break;
+ }
+}
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb,
+ int waitflags)
+{
+ /*
+ * go through our old INP and pull off any control structures that
+ * belong to stcb and move then to the new inp.
+ */
+ struct socket *old_so, *new_so;
+ struct sctp_queued_to_read *control, *nctl;
+ struct sctp_readhead tmp_queue;
+ struct mbuf *m;
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ int error = 0;
+#endif
+
+ old_so = old_inp->sctp_socket;
+ new_so = new_inp->sctp_socket;
+ TAILQ_INIT(&tmp_queue);
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ error = sblock(&old_so->so_rcv, waitflags);
+ if (error) {
+ /* Gak, can't get sblock, we have a problem.
+ * data will be left stranded.. and we
+ * don't dare look at it since the
+ * other thread may be reading something.
+ * Oh well, its a screwed up app that does
+ * a peeloff OR a accept while reading
+ * from the main socket... actually its
+ * only the peeloff() case, since I think
+ * read will fail on a listening socket..
+ */
+ return;
+ }
+#endif
+ /* lock the socket buffers */
+ SCTP_INP_READ_LOCK(old_inp);
+ TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
+ /* Pull off all for out target stcb */
+ if (control->stcb == stcb) {
+ /* remove it we want it */
+ TAILQ_REMOVE(&old_inp->read_queue, control, next);
+ TAILQ_INSERT_TAIL(&tmp_queue, control, next);
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &old_so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ }
+ }
+ SCTP_INP_READ_UNLOCK(old_inp);
+ /* Remove the sb-lock on the old socket */
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&old_so->so_rcv, 1);
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sbunlock(&old_so->so_rcv);
+#endif
+ /* Now we move them over to the new socket buffer */
+ SCTP_INP_READ_LOCK(new_inp);
+ TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
+ TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+ }
+ sctp_sballoc(stcb, &new_so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ }
+ SCTP_INP_READ_UNLOCK(new_inp);
+}
+
+void
+sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ int so_locked
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ SCTP_UNUSED
+#endif
+)
+{
+ if ((inp != NULL) && (inp->sctp_socket != NULL)) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(inp);
+ if (!so_locked) {
+ if (stcb) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_SOCKET_LOCK(so, 1);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ sctp_sorwakeup(inp, inp->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+}
+#if defined(__Userspace__)
+
+void
+sctp_invoke_recv_callback(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ int inp_read_lock_held)
+{
+ uint32_t pd_point, length;
+
+ if ((inp->recv_callback == NULL) ||
+ (stcb == NULL) ||
+ (stcb->sctp_socket == NULL)) {
+ return;
+ }
+
+ length = control->length;
+ if (stcb != NULL && stcb->sctp_socket != NULL) {
+ pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
+ stcb->sctp_ep->partial_delivery_point);
+ } else {
+ pd_point = inp->partial_delivery_point;
+ }
+ if ((control->end_added == 1) || (length >= pd_point)) {
+ struct socket *so;
+ struct mbuf *m;
+ char *buffer;
+ struct sctp_rcvinfo rcv;
+ union sctp_sockstore addr;
+ int flags;
+
+ if ((buffer = malloc(length)) == NULL) {
+ return;
+ }
+ if (inp_read_lock_held == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ }
+ so = stcb->sctp_socket;
+ for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
+ sctp_sbfree(control, control->stcb, &so->so_rcv, m);
+ }
+ m_copydata(control->data, 0, length, buffer);
+ memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
+ rcv.rcv_sid = control->sinfo_stream;
+ rcv.rcv_ssn = (uint16_t)control->mid;
+ rcv.rcv_flags = control->sinfo_flags;
+ rcv.rcv_ppid = control->sinfo_ppid;
+ rcv.rcv_tsn = control->sinfo_tsn;
+ rcv.rcv_cumtsn = control->sinfo_cumtsn;
+ rcv.rcv_context = control->sinfo_context;
+ rcv.rcv_assoc_id = control->sinfo_assoc_id;
+ memset(&addr, 0, sizeof(union sctp_sockstore));
+ switch (control->whoFrom->ro._l_addr.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ addr.sin = control->whoFrom->ro._l_addr.sin;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ addr.sin6 = control->whoFrom->ro._l_addr.sin6;
+ break;
+#endif
+ case AF_CONN:
+ addr.sconn = control->whoFrom->ro._l_addr.sconn;
+ break;
+ default:
+ addr.sa = control->whoFrom->ro._l_addr.sa;
+ break;
+ }
+ flags = 0;
+ if (control->end_added == 1) {
+ flags |= MSG_EOR;
+ }
+ if (control->spec_flags & M_NOTIFICATION) {
+ flags |= MSG_NOTIFICATION;
+ }
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ control->tail_mbuf = NULL;
+ control->length = 0;
+ if (control->end_added) {
+ TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
+ control->on_read_q = 0;
+ sctp_free_remote_addr(control->whoFrom);
+ control->whoFrom = NULL;
+ sctp_free_a_readq(stcb, control);
+ }
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ if (inp_read_lock_held == 0) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+}
+#endif
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end,
+ int inp_read_lock_held,
+ int so_locked)
+{
+ /*
+ * Here we must place the control on the end of the socket read
+ * queue AND increment sb_cc so that select will work properly on
+ * read.
+ */
+ struct mbuf *m, *prev = NULL;
+
+ if (inp == NULL) {
+ /* Gak, TSNH!! */
+#ifdef INVARIANTS
+ panic("Gak, inp NULL on add_to_readq");
+#endif
+ return;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(inp));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(inp));
+ }
+#endif
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_LOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+ if (!control->on_strm_q) {
+ sctp_free_remote_addr(control->whoFrom);
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+ }
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+ return;
+ }
+ if (!(control->spec_flags & M_NOTIFICATION)) {
+ atomic_add_int(&inp->total_recvs, 1);
+ if (!control->do_not_ref_stcb) {
+ atomic_add_int(&stcb->total_recvs, 1);
+ }
+ }
+ m = control->data;
+ control->held_length = 0;
+ control->length = 0;
+ while (m) {
+ if (SCTP_BUF_LEN(m) == 0) {
+ /* Skip mbufs with NO length */
+ if (prev == NULL) {
+ /* First one */
+ control->data = sctp_m_free(m);
+ m = control->data;
+ } else {
+ SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+ m = SCTP_BUF_NEXT(prev);
+ }
+ if (m == NULL) {
+ control->tail_mbuf = prev;
+ }
+ continue;
+ }
+ prev = m;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+ }
+ sctp_sballoc(stcb, sb, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (prev != NULL) {
+ control->tail_mbuf = prev;
+ } else {
+ /* Everything got collapsed out?? */
+ if (!control->on_strm_q) {
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ }
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+ return;
+ }
+ if (end) {
+ control->end_added = 1;
+ }
+ TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
+ control->on_read_q = 1;
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+#if defined(__Userspace__)
+ sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
+#endif
+ if (inp && inp->sctp_socket) {
+ sctp_wakeup_the_read_socket(inp, stcb, so_locked);
+ }
+}
+
+/*************HOLD THIS COMMENT FOR PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+struct mbuf *
+sctp_generate_cause(uint16_t code, char *info)
+{
+ struct mbuf *m;
+ struct sctp_gen_error_cause *cause;
+ size_t info_len;
+ uint16_t len;
+
+ if ((code == 0) || (info == NULL)) {
+ return (NULL);
+ }
+ info_len = strlen(info);
+ if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
+ return (NULL);
+ }
+ len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
+ m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m != NULL) {
+ SCTP_BUF_LEN(m) = len;
+ cause = mtod(m, struct sctp_gen_error_cause *);
+ cause->code = htons(code);
+ cause->length = htons(len);
+ memcpy(cause->info, info, info_len);
+ }
+ return (m);
+}
+
+struct mbuf *
+sctp_generate_no_user_data_cause(uint32_t tsn)
+{
+ struct mbuf *m;
+ struct sctp_error_no_user_data *no_user_data_cause;
+ uint16_t len;
+
+ len = (uint16_t)sizeof(struct sctp_error_no_user_data);
+ m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+ if (m != NULL) {
+ SCTP_BUF_LEN(m) = len;
+ no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
+ no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
+ no_user_data_cause->cause.length = htons(len);
+ no_user_data_cause->tsn = htonl(tsn);
+ }
+ return (m);
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *tp1, int chk_cnt)
+{
+ if (tp1->data == NULL) {
+ return;
+ }
+ asoc->chunks_on_out_queue -= chk_cnt;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
+ sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
+ asoc->total_output_queue_size,
+ tp1->book_size,
+ 0,
+ tp1->mbcnt);
+ }
+ if (asoc->total_output_queue_size >= tp1->book_size) {
+ atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
+ } else {
+ asoc->total_output_queue_size = 0;
+ }
+
+ if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
+ stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
+ } else {
+ stcb->sctp_socket->so_snd.sb_cc = 0;
+
+ }
+ }
+}
+
+#endif
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
+ uint8_t sent, int so_locked)
+{
+ struct sctp_stream_out *strq;
+ struct sctp_tmit_chunk *chk = NULL, *tp2;
+ struct sctp_stream_queue_pending *sp;
+ uint32_t mid;
+ uint16_t sid;
+ uint8_t foundeom = 0;
+ int ret_sz = 0;
+ int notdone;
+ int do_wakeup_routine = 0;
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (so_locked) {
+ sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ } else {
+ sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+ }
+#endif
+ sid = tp1->rec.data.sid;
+ mid = tp1->rec.data.mid;
+ if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
+ stcb->asoc.abandoned_sent[0]++;
+ stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
+ stcb->asoc.strmout[sid].abandoned_sent[0]++;
+#if defined(SCTP_DETAILED_STR_STATS)
+ stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
+#endif
+ } else {
+ stcb->asoc.abandoned_unsent[0]++;
+ stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
+ stcb->asoc.strmout[sid].abandoned_unsent[0]++;
+#if defined(SCTP_DETAILED_STR_STATS)
+ stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
+#endif
+ }
+ do {
+ ret_sz += tp1->book_size;
+ if (tp1->data != NULL) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ stcb->asoc.peers_rwnd += tp1->send_size;
+ stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+ if (sent) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
+ }
+ if (tp1->data) {
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ do_wakeup_routine = 1;
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ stcb->asoc.sent_queue_cnt_removeable--;
+ }
+ }
+ tp1->sent = SCTP_FORWARD_TSN_SKIP;
+ if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
+ SCTP_DATA_NOT_FRAG) {
+ /* not frag'ed we ae done */
+ notdone = 0;
+ foundeom = 1;
+ } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* end of frag, we are done */
+ notdone = 0;
+ foundeom = 1;
+ } else {
+ /*
+ * Its a begin or middle piece, we must mark all of
+ * it
+ */
+ notdone = 1;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ } while (tp1 && notdone);
+ if (foundeom == 0) {
+ /*
+ * The multi-part message was scattered across the send and
+ * sent queue.
+ */
+ TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
+ if ((tp1->rec.data.sid != sid) ||
+ (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
+ break;
+ }
+ /* save to chk in case we have some on stream out
+ * queue. If so and we have an un-transmitted one
+ * we don't have to fudge the TSN.
+ */
+ chk = tp1;
+ ret_sz += tp1->book_size;
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ if (sent) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
+ }
+ if (tp1->data) {
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ /* No flight involved here book the size to 0 */
+ tp1->book_size = 0;
+ if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ foundeom = 1;
+ }
+ do_wakeup_routine = 1;
+ tp1->sent = SCTP_FORWARD_TSN_SKIP;
+ TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
+ /* on to the sent queue so we can wait for it to be passed by. */
+ TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
+ sctp_next);
+ stcb->asoc.send_queue_cnt--;
+ stcb->asoc.sent_queue_cnt++;
+ }
+ }
+ if (foundeom == 0) {
+ /*
+ * Still no eom found. That means there
+ * is stuff left on the stream out queue.. yuck.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ strq = &stcb->asoc.strmout[sid];
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp != NULL) {
+ sp->discard_rest = 1;
+ /*
+ * We may need to put a chunk on the
+ * queue that holds the TSN that
+ * would have been sent with the LAST
+ * bit.
+ */
+ if (chk == NULL) {
+ /* Yep, we have to */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* we are hosed. All we can
+ * do is nothing.. which will
+ * cause an abort if the peer is
+ * paying attention.
+ */
+ goto oh_well;
+ }
+ memset(chk, 0, sizeof(*chk));
+ chk->rec.data.rcv_flags = 0;
+ chk->sent = SCTP_FORWARD_TSN_SKIP;
+ chk->asoc = &stcb->asoc;
+ if (stcb->asoc.idata_supported == 0) {
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ chk->rec.data.mid = 0;
+ } else {
+ chk->rec.data.mid = strq->next_mid_ordered;
+ }
+ } else {
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ chk->rec.data.mid = strq->next_mid_unordered;
+ } else {
+ chk->rec.data.mid = strq->next_mid_ordered;
+ }
+ }
+ chk->rec.data.sid = sp->sid;
+ chk->rec.data.ppid = sp->ppid;
+ chk->rec.data.context = sp->context;
+ chk->flags = sp->act_flags;
+ chk->whoTo = NULL;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
+#else
+ chk->rec.data.tsn = stcb->asoc.sending_seq++;
+#endif
+ strq->chunks_on_queues++;
+ TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
+ stcb->asoc.sent_queue_cnt++;
+ stcb->asoc.pr_sctp_cnt++;
+ }
+ chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
+ }
+ if (stcb->asoc.idata_supported == 0) {
+ if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
+ strq->next_mid_ordered++;
+ }
+ } else {
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ strq->next_mid_unordered++;
+ } else {
+ strq->next_mid_ordered++;
+ }
+ }
+ oh_well:
+ if (sp->data) {
+ /* Pull any data to free up the SB and
+ * allow sender to "add more" while we
+ * will throw away :-)
+ */
+ sctp_free_spbufspace(stcb, &stcb->asoc, sp);
+ ret_sz += sp->length;
+ do_wakeup_routine = 1;
+ sp->some_taken = 1;
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ sp->length = 0;
+ }
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ if (do_wakeup_routine) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (ret_sz);
+ }
+ }
+#endif
+ sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+ return (ret_sz);
+}
+
+/*
+ * checks to see if the given address, sa, is one that is currently known by
+ * the kernel note: can't distinguish the same address on multiple interfaces
+ * and doesn't handle multiple addresses with different zone/scope id's note:
+ * ifa_ifwithaddr() compares the entire sockaddr struct
+ */
+struct sctp_ifa *
+sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
+ int holds_lock)
+{
+ struct sctp_laddr *laddr;
+
+ if (holds_lock == 0) {
+ SCTP_INP_RLOCK(inp);
+ }
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL)
+ continue;
+ if (addr->sa_family != laddr->ifa->address.sa.sa_family)
+ continue;
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+ laddr->ifa->address.sin.sin_addr.s_addr) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &laddr->ifa->address.sin6)) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+ }
+ if (holds_lock == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (laddr != NULL) {
+ return (laddr->ifa);
+ } else {
+ return (NULL);
+ }
+}
+
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr)
+{
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+ uint32_t hash_of_addr;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+#if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__)
+ hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
+ sin6->sin6_addr.s6_addr32[1] +
+ sin6->sin6_addr.s6_addr32[2] +
+ sin6->sin6_addr.s6_addr32[3]);
+#else
+ hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
+ ((uint32_t *)&sin6->sin6_addr)[1] +
+ ((uint32_t *)&sin6->sin6_addr)[2] +
+ ((uint32_t *)&sin6->sin6_addr)[3]);
+#endif
+ hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
+ return (hash_of_addr);
+ }
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ {
+ struct sockaddr_conn *sconn;
+ uintptr_t temp;
+
+ sconn = (struct sockaddr_conn *)addr;
+ temp = (uintptr_t)sconn->sconn_addr;
+ return ((uint32_t)(temp ^ (temp >> 16)));
+ }
+#endif
+ default:
+ break;
+ }
+ return (0);
+}
+
+struct sctp_ifa *
+sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
+{
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_vrf *vrf;
+ struct sctp_ifalist *hash_head;
+ uint32_t hash_of_addr;
+
+ if (holds_lock == 0) {
+ SCTP_IPI_ADDR_RLOCK();
+ } else {
+ SCTP_IPI_ADDR_LOCK_ASSERT();
+ }
+
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+ }
+
+ hash_of_addr = sctp_get_ifa_hash_val(addr);
+
+ hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+ if (hash_head == NULL) {
+ SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
+ hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
+ (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
+ sctp_print_address(addr);
+ SCTP_PRINTF("No such bucket for address\n");
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+
+ return (NULL);
+ }
+ LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
+ if (addr->sa_family != sctp_ifap->address.sa.sa_family)
+ continue;
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+ sctp_ifap->address.sin.sin_addr.s_addr) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &sctp_ifap->address.sin6)) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+#if defined(__Userspace__)
+ if (addr->sa_family == AF_CONN) {
+ if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
+ /* found him. */
+ break;
+ }
+ }
+#endif
+ }
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (sctp_ifap);
+}
+
+static void
+sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
+ uint32_t rwnd_req)
+{
+ /* User pulled some data, do we need a rwnd update? */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ int r_unlocked = 0;
+ uint32_t dif, rwnd;
+ struct socket *so = NULL;
+
+ if (stcb == NULL)
+ return;
+
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+
+ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
+ /* Pre-check If we are freeing no update */
+ goto no_lock;
+ }
+ SCTP_INP_INCR_REF(stcb->sctp_ep);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+ so = stcb->sctp_socket;
+ if (so == NULL) {
+ goto out;
+ }
+ atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
+ /* Have you have freed enough to look */
+ *freed_so_far = 0;
+ /* Yep, its worth a look and the lock overhead */
+
+ /* Figure out what the rwnd would be */
+ rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
+ if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
+ dif = rwnd - stcb->asoc.my_last_reported_rwnd;
+ } else {
+ dif = 0;
+ }
+ if (dif >= rwnd_req) {
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ r_unlocked = 1;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /*
+ * One last check before we allow the guy possibly
+ * to get in. There is a race, where the guy has not
+ * reached the gate. In that case
+ */
+ goto out;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* No reports here */
+ SCTP_TCB_UNLOCK(stcb);
+ goto out;
+ }
+ SCTP_STAT_INCR(sctps_wu_sacks_sent);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_send_sack(stcb, SCTP_SO_LOCKED);
+
+ sctp_chunk_output(stcb->sctp_ep, stcb,
+ SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
+ /* make sure no timer is running */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Update how much we have pending */
+ stcb->freed_by_sorcv_sincelast = dif;
+ }
+ out:
+ if (so && r_unlocked && hold_rlock) {
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ }
+
+ SCTP_INP_DECR_REF(stcb->sctp_ep);
+ no_lock:
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ return;
+}
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo)
+{
+ /*
+ * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
+ * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
+ * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
+ * On the way out we may send out any combination of:
+ * MSG_NOTIFICATION MSG_EOR
+ *
+ */
+ struct sctp_inpcb *inp = NULL;
+ ssize_t my_len = 0;
+ ssize_t cp_len = 0;
+ int error = 0;
+ struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
+ struct mbuf *m = NULL;
+ struct sctp_tcb *stcb = NULL;
+ int wakeup_read_socket = 0;
+ int freecnt_applied = 0;
+ int out_flags = 0, in_flags = 0;
+ int block_allowed = 1;
+ uint32_t freed_so_far = 0;
+ ssize_t copied_so_far = 0;
+ int in_eeor_mode = 0;
+ int no_rcv_needed = 0;
+ uint32_t rwnd_req = 0;
+ int hold_sblock = 0;
+ int hold_rlock = 0;
+ ssize_t slen = 0;
+ uint32_t held_length = 0;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ int sockbuf_lock = 0;
+#endif
+
+ if (uio == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+
+ if (msg_flags) {
+ in_flags = *msg_flags;
+ if (in_flags & MSG_PEEK)
+ SCTP_STAT_INCR(sctps_read_peeks);
+ } else {
+ in_flags = 0;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ slen = uio->uio_resid;
+#else
+ slen = uio_resid(uio);
+#endif
+#else
+ slen = uio->uio_resid;
+#endif
+
+ /* Pull in and set up our int flags */
+ if (in_flags & MSG_OOB) {
+ /* Out of band's NOT supported */
+ return (EOPNOTSUPP);
+ }
+ if ((in_flags & MSG_PEEK) && (mp != NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if ((in_flags & (MSG_DONTWAIT
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ | MSG_NBIO
+#endif
+ )) ||
+ SCTP_SO_IS_NBIO(so)) {
+ block_allowed = 0;
+ }
+ /* setup the endpoint */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
+ return (EFAULT);
+ }
+ rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
+ /* Must be at least a MTU's worth */
+ if (rwnd_req < SCTP_MIN_RWND)
+ rwnd_req = SCTP_MIN_RWND;
+ in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ sctp_misc_ints(SCTP_SORECV_ENTER,
+ rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
+#else
+ sctp_misc_ints(SCTP_SORECV_ENTER,
+ rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
+#endif
+#else
+ sctp_misc_ints(SCTP_SORECV_ENTER,
+ rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
+#endif
+ }
+#if defined(__Userspace__)
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ sctp_misc_ints(SCTP_SORECV_ENTERPL,
+ rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
+#else
+ sctp_misc_ints(SCTP_SORECV_ENTERPL,
+ rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
+#endif
+#else
+ sctp_misc_ints(SCTP_SORECV_ENTERPL,
+ rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
+#endif
+ }
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
+#endif
+ if (error) {
+ goto release_unlocked;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sockbuf_lock = 1;
+#endif
+ restart:
+#if defined(__Userspace__)
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&so->so_rcv, 1);
+#endif
+
+ restart_nosblocks:
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
+#else
+ if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
+#endif
+ if (so->so_error) {
+ error = so->so_error;
+ if ((in_flags & MSG_PEEK) == 0)
+ so->so_error = 0;
+ goto out;
+ } else {
+ if (so->so_rcv.sb_cc == 0) {
+ /* indicate EOF */
+ error = 0;
+ goto out;
+ }
+ }
+ }
+ if (so->so_rcv.sb_cc <= held_length) {
+ if (so->so_error) {
+ error = so->so_error;
+ if ((in_flags & MSG_PEEK) == 0) {
+ so->so_error = 0;
+ }
+ goto out;
+ }
+ if ((so->so_rcv.sb_cc == 0) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /* For active open side clear flags for re-use
+ * passive open is blocked by connect.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ /* You were aborted, passive side always hits here */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+ error = ECONNRESET;
+ }
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ if (error == 0) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
+ error = ENOTCONN;
+ }
+ }
+ goto out;
+ }
+ }
+ if (block_allowed) {
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto out;
+ }
+ held_length = 0;
+ goto restart_nosblocks;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
+ error = EWOULDBLOCK;
+ goto out;
+ }
+ }
+ if (hold_sblock == 1) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+ /* we possibly have data we can read */
+ /*sa_ignore FREED_MEMORY*/
+ control = TAILQ_FIRST(&inp->read_queue);
+ if (control == NULL) {
+ /* This could be happening since
+ * the appender did the increment but as not
+ * yet did the tailq insert onto the read_queue
+ */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ }
+ control = TAILQ_FIRST(&inp->read_queue);
+ if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
+#ifdef INVARIANTS
+ panic("Huh, its non zero and nothing on control?");
+#endif
+ so->so_rcv.sb_cc = 0;
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ goto restart;
+ }
+
+ if ((control->length == 0) &&
+ (control->do_not_ref_stcb)) {
+ /* Clean up code for freeing assoc that left behind a pdapi..
+ * maybe a peer in EEOR that just closed after sending and
+ * never indicated a EOR.
+ */
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ control->held_length = 0;
+ if (control->data) {
+ /* Hmm there is data here .. fix */
+ struct mbuf *m_tmp;
+ int cnt = 0;
+ m_tmp = control->data;
+ while (m_tmp) {
+ cnt += SCTP_BUF_LEN(m_tmp);
+ if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+ control->tail_mbuf = m_tmp;
+ control->end_added = 1;
+ }
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ control->length = cnt;
+ } else {
+ /* remove it */
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ }
+ if (hold_rlock) {
+ hold_rlock = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ goto restart;
+ }
+ if ((control->length == 0) &&
+ (control->end_added == 1)) {
+ /* Do we also need to check for (control->pdapi_aborted == 1)? */
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ if (control->data) {
+#ifdef INVARIANTS
+ panic("control->data not null but control->length == 0");
+#else
+ SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
+ sctp_m_freem(control->data);
+ control->data = NULL;
+#endif
+ }
+ if (control->aux_data) {
+ sctp_m_free (control->aux_data);
+ control->aux_data = NULL;
+ }
+#ifdef INVARIANTS
+ if (control->on_strm_q) {
+ panic("About to free ctl:%p so:%p and its in %d",
+ control, so, control->on_strm_q);
+ }
+#endif
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ if (hold_rlock) {
+ hold_rlock = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ goto restart;
+ }
+ if (control->length == 0) {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
+ (filling_sinfo)) {
+ /* find a more suitable one then this */
+ ctl = TAILQ_NEXT(control, next);
+ while (ctl) {
+ if ((ctl->stcb != control->stcb) && (ctl->length) &&
+ (ctl->some_taken ||
+ (ctl->spec_flags & M_NOTIFICATION) ||
+ ((ctl->do_not_ref_stcb == 0) &&
+ (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
+ ) {
+ /*-
+ * If we have a different TCB next, and there is data
+ * present. If we have already taken some (pdapi), OR we can
+ * ref the tcb and no delivery as started on this stream, we
+ * take it. Note we allow a notification on a different
+ * assoc to be delivered..
+ */
+ control = ctl;
+ goto found_one;
+ } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
+ (ctl->length) &&
+ ((ctl->some_taken) ||
+ ((ctl->do_not_ref_stcb == 0) &&
+ ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
+ (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
+ /*-
+ * If we have the same tcb, and there is data present, and we
+ * have the strm interleave feature present. Then if we have
+ * taken some (pdapi) or we can refer to tht tcb AND we have
+ * not started a delivery for this stream, we can take it.
+ * Note we do NOT allow a notificaiton on the same assoc to
+ * be delivered.
+ */
+ control = ctl;
+ goto found_one;
+ }
+ ctl = TAILQ_NEXT(ctl, next);
+ }
+ }
+ /*
+ * if we reach here, not suitable replacement is available
+ * <or> fragment interleave is NOT on. So stuff the sb_cc
+ * into the our held count, and its time to sleep again.
+ */
+ held_length = so->so_rcv.sb_cc;
+ control->held_length = so->so_rcv.sb_cc;
+ goto restart;
+ }
+ /* Clear the held length since there is something to read */
+ control->held_length = 0;
+ found_one:
+ /*
+ * If we reach here, control has a some data for us to read off.
+ * Note that stcb COULD be NULL.
+ */
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ control->some_taken++;
+ stcb = control->stcb;
+ if (stcb) {
+ if ((control->do_not_ref_stcb == 0) &&
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+ if (freecnt_applied == 0)
+ stcb = NULL;
+ } else if (control->do_not_ref_stcb == 0) {
+ /* you can't free it on me please */
+ /*
+ * The lock on the socket buffer protects us so the
+ * free code will stop. But since we used the socketbuf
+ * lock and the sender uses the tcb_lock to increment,
+ * we need to use the atomic add to the refcnt
+ */
+ if (freecnt_applied) {
+#ifdef INVARIANTS
+ panic("refcnt already incremented");
+#else
+ SCTP_PRINTF("refcnt already incremented?\n");
+#endif
+ } else {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ freecnt_applied = 1;
+ }
+ /*
+ * Setup to remember how much we have not yet told
+ * the peer our rwnd has opened up. Note we grab
+ * the value from the tcb from last time.
+ * Note too that sack sending clears this when a sack
+ * is sent, which is fine. Once we hit the rwnd_req,
+ * we then will go to the sctp_user_rcvd() that will
+ * not lock until it KNOWs it MUST send a WUP-SACK.
+ */
+ freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
+ stcb->freed_by_sorcv_sincelast = 0;
+ }
+ }
+ if (stcb &&
+ ((control->spec_flags & M_NOTIFICATION) == 0) &&
+ control->do_not_ref_stcb == 0) {
+ stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
+ }
+
+ /* First lets get off the sinfo and sockaddr info */
+ if ((sinfo != NULL) && (filling_sinfo != 0)) {
+ sinfo->sinfo_stream = control->sinfo_stream;
+ sinfo->sinfo_ssn = (uint16_t)control->mid;
+ sinfo->sinfo_flags = control->sinfo_flags;
+ sinfo->sinfo_ppid = control->sinfo_ppid;
+ sinfo->sinfo_context =control->sinfo_context;
+ sinfo->sinfo_timetolive = control->sinfo_timetolive;
+ sinfo->sinfo_tsn = control->sinfo_tsn;
+ sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
+ sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
+ nxt = TAILQ_NEXT(control, next);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
+ struct sctp_extrcvinfo *s_extra;
+ s_extra = (struct sctp_extrcvinfo *)sinfo;
+ if ((nxt) &&
+ (nxt->length)) {
+ s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
+ if (nxt->sinfo_flags & SCTP_UNORDERED) {
+ s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
+ }
+ if (nxt->spec_flags & M_NOTIFICATION) {
+ s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
+ }
+ s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
+ s_extra->serinfo_next_length = nxt->length;
+ s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
+ s_extra->serinfo_next_stream = nxt->sinfo_stream;
+ if (nxt->tail_mbuf != NULL) {
+ if (nxt->end_added) {
+ s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
+ }
+ }
+ } else {
+ /* we explicitly 0 this, since the memcpy got
+ * some other things beyond the older sinfo_
+ * that is on the control's structure :-D
+ */
+ nxt = NULL;
+ s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
+ s_extra->serinfo_next_aid = 0;
+ s_extra->serinfo_next_length = 0;
+ s_extra->serinfo_next_ppid = 0;
+ s_extra->serinfo_next_stream = 0;
+ }
+ }
+ /*
+ * update off the real current cum-ack, if we have an stcb.
+ */
+ if ((control->do_not_ref_stcb == 0) && stcb)
+ sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ /*
+ * mask off the high bits, we keep the actual chunk bits in
+ * there.
+ */
+ sinfo->sinfo_flags &= 0x00ff;
+ if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+ sinfo->sinfo_flags |= SCTP_UNORDERED;
+ }
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ {
+ int index, newindex;
+ struct sctp_pcbtsn_rlog *entry;
+ do {
+ index = inp->readlog_index;
+ newindex = index + 1;
+ if (newindex >= SCTP_READ_LOG_SIZE) {
+ newindex = 0;
+ }
+ } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
+ entry = &inp->readlog[index];
+ entry->vtag = control->sinfo_assoc_id;
+ entry->strm = control->sinfo_stream;
+ entry->seq = (uint16_t)control->mid;
+ entry->sz = control->length;
+ entry->flgs = control->sinfo_flags;
+ }
+#endif
+ if ((fromlen > 0) && (from != NULL)) {
+ union sctp_sockstore store;
+ size_t len;
+
+ switch (control->whoFrom->ro._l_addr.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ len = sizeof(struct sockaddr_in6);
+ store.sin6 = control->whoFrom->ro._l_addr.sin6;
+ store.sin6.sin6_port = control->port_from;
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ len = sizeof(struct sockaddr_in6);
+ in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
+ &store.sin6);
+ store.sin6.sin6_port = control->port_from;
+ } else {
+ len = sizeof(struct sockaddr_in);
+ store.sin = control->whoFrom->ro._l_addr.sin;
+ store.sin.sin_port = control->port_from;
+ }
+#else
+ len = sizeof(struct sockaddr_in);
+ store.sin = control->whoFrom->ro._l_addr.sin;
+ store.sin.sin_port = control->port_from;
+#endif
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ len = sizeof(struct sockaddr_conn);
+ store.sconn = control->whoFrom->ro._l_addr.sconn;
+ store.sconn.sconn_port = control->port_from;
+ break;
+#endif
+ default:
+ len = 0;
+ break;
+ }
+ memcpy(from, &store, min((size_t)fromlen, len));
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+#ifdef INET6
+ {
+ struct sockaddr_in6 lsa6, *from6;
+
+ from6 = (struct sockaddr_in6 *)from;
+ sctp_recover_scope_mac(from6, (&lsa6));
+ }
+#endif
+#endif
+ }
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ /* now copy out what data we can */
+ if (mp == NULL) {
+ /* copy out each mbuf in the chain up to length */
+ get_more_data:
+ m = control->data;
+ while (m) {
+ /* Move out all we can */
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ cp_len = uio->uio_resid;
+#else
+ cp_len = uio_resid(uio);
+#endif
+#else
+ cp_len = uio->uio_resid;
+#endif
+ my_len = SCTP_BUF_LEN(m);
+ if (cp_len > my_len) {
+ /* not enough in this buf */
+ cp_len = my_len;
+ }
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+ if (cp_len > 0)
+ error = uiomove(mtod(m, char *), (int)cp_len, uio);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(so, 0);
+#endif
+ /* re-read */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ goto release;
+ }
+
+ if ((control->do_not_ref_stcb == 0) && stcb &&
+ stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ no_rcv_needed = 1;
+ }
+ if (error) {
+ /* error we are out of here */
+ goto release;
+ }
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ if (cp_len == SCTP_BUF_LEN(m)) {
+ if ((SCTP_BUF_NEXT(m)== NULL) &&
+ (control->end_added)) {
+ out_flags |= MSG_EOR;
+ if ((control->do_not_ref_stcb == 0) &&
+ (control->stcb != NULL) &&
+ ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ /* we ate up the mbuf */
+ if (in_flags & MSG_PEEK) {
+ /* just looking */
+ m = SCTP_BUF_NEXT(m);
+ copied_so_far += cp_len;
+ } else {
+ /* dispose of the mbuf */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ copied_so_far += cp_len;
+ freed_so_far += (uint32_t)cp_len;
+ freed_so_far += MSIZE;
+ atomic_subtract_int(&control->length, cp_len);
+ control->data = sctp_m_free(m);
+ m = control->data;
+ /* been through it all, must hold sb lock ok to null tail */
+ if (control->data == NULL) {
+#ifdef INVARIANTS
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /* If the end is not added, OR the
+ * next is NOT null we MUST have the lock.
+ */
+ if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
+ panic("Hmm we don't own the lock?");
+ }
+ }
+#endif
+#endif
+ control->tail_mbuf = NULL;
+#ifdef INVARIANTS
+ if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
+ panic("end_added, nothing left and no MSG_EOR");
+ }
+#endif
+ }
+ }
+ } else {
+ /* Do we need to trim the mbuf? */
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ if ((in_flags & MSG_PEEK) == 0) {
+ SCTP_BUF_RESV_UF(m, cp_len);
+ SCTP_BUF_LEN(m) -= (int)cp_len;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len);
+ }
+ atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
+ if ((control->do_not_ref_stcb == 0) &&
+ stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
+ }
+ copied_so_far += cp_len;
+ freed_so_far += (uint32_t)cp_len;
+ freed_so_far += MSIZE;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
+ SCTP_LOG_SBRESULT, 0);
+ }
+ atomic_subtract_int(&control->length, cp_len);
+ } else {
+ copied_so_far += cp_len;
+ }
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
+#else
+ if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
+#endif
+#else
+ if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
+#endif
+ break;
+ }
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ (control->do_not_ref_stcb == 0) &&
+ (freed_so_far >= rwnd_req)) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+ } /* end while(m) */
+ /*
+ * At this point we have looked at it all and we either have
+ * a MSG_EOR/or read all the user wants... <OR>
+ * control->length == 0.
+ */
+ if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
+ /* we are done with this control */
+ if (control->length == 0) {
+ if (control->data) {
+#ifdef INVARIANTS
+ panic("control->data not null at read eor?");
+#else
+ SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
+ sctp_m_freem(control->data);
+ control->data = NULL;
+#endif
+ }
+ done_with_control:
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ if (control->held_length) {
+ held_length = 0;
+ control->held_length = 0;
+ wakeup_read_socket = 1;
+ }
+ if (control->aux_data) {
+ sctp_m_free (control->aux_data);
+ control->aux_data = NULL;
+ }
+ no_rcv_needed = control->do_not_ref_stcb;
+ sctp_free_remote_addr(control->whoFrom);
+ control->data = NULL;
+#ifdef INVARIANTS
+ if (control->on_strm_q) {
+ panic("About to free ctl:%p so:%p and its in %d",
+ control, so, control->on_strm_q);
+ }
+#endif
+ sctp_free_a_readq(stcb, control);
+ control = NULL;
+ if ((freed_so_far >= rwnd_req) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+
+ } else {
+ /*
+ * The user did not read all of this
+ * message, turn off the returned MSG_EOR
+ * since we are leaving more behind on the
+ * control to read.
+ */
+#ifdef INVARIANTS
+ if (control->end_added &&
+ (control->data == NULL) &&
+ (control->tail_mbuf == NULL)) {
+ panic("Gak, control->length is corrupt?");
+ }
+#endif
+ no_rcv_needed = control->do_not_ref_stcb;
+ out_flags &= ~MSG_EOR;
+ }
+ }
+ if (out_flags & MSG_EOR) {
+ goto release;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ if ((uio->uio_resid == 0) ||
+#else
+ if ((uio_resid(uio) == 0) ||
+#endif
+#else
+ if ((uio->uio_resid == 0) ||
+#endif
+ ((in_eeor_mode) &&
+ (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
+ goto release;
+ }
+ /*
+ * If I hit here the receiver wants more and this message is
+ * NOT done (pd-api). So two questions. Can we block? if not
+ * we are done. Did the user NOT set MSG_WAITALL?
+ */
+ if (block_allowed == 0) {
+ goto release;
+ }
+ /*
+ * We need to wait for more data a few things: - We don't
+ * sbunlock() so we don't get someone else reading. - We
+ * must be sure to account for the case where what is added
+ * is NOT to our control when we wakeup.
+ */
+
+ /* Do we need to tell the transport a rwnd update might be
+ * needed before we go to sleep?
+ */
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ ((freed_so_far >= rwnd_req) &&
+ (control->do_not_ref_stcb == 0) &&
+ (no_rcv_needed == 0))) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+ wait_some_more:
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ goto release;
+ }
+#else
+ if (so->so_state & SS_CANTRCVMORE) {
+ goto release;
+ }
+#endif
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
+ goto release;
+
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if ((copied_so_far) && (control->length == 0) &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
+ goto release;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&so->so_rcv, 1);
+#endif
+ if (so->so_rcv.sb_cc <= control->held_length) {
+ error = sbwait(&so->so_rcv);
+ if (error) {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ goto release;
+#else
+ goto release_unlocked;
+#endif
+ }
+ control->held_length = 0;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if (control->length == 0) {
+ /* still nothing here */
+ if (control->end_added == 1) {
+ /* he aborted, or is done i.e.did a shutdown */
+ out_flags |= MSG_EOR;
+ if (control->pdapi_aborted) {
+ if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+
+ out_flags |= MSG_TRUNC;
+ } else {
+ if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ goto done_with_control;
+ }
+ if (so->so_rcv.sb_cc > held_length) {
+ control->held_length = so->so_rcv.sb_cc;
+ held_length = 0;
+ }
+ goto wait_some_more;
+ } else if (control->data == NULL) {
+ /* we must re-sync since data
+ * is probably being added
+ */
+ SCTP_INP_READ_LOCK(inp);
+ if ((control->length > 0) && (control->data == NULL)) {
+ /* big trouble.. we have the lock and its corrupt? */
+#ifdef INVARIANTS
+ panic ("Impossible data==NULL length !=0");
+#endif
+ out_flags |= MSG_EOR;
+ out_flags |= MSG_TRUNC;
+ control->length = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ goto done_with_control;
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ /* We will fall around to get more data */
+ }
+ goto get_more_data;
+ } else {
+ /*-
+ * Give caller back the mbuf chain,
+ * store in uio_resid the length
+ */
+ wakeup_read_socket = 0;
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /* Need to get rlock */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ if (control->end_added) {
+ out_flags |= MSG_EOR;
+ if ((control->do_not_ref_stcb == 0) &&
+ (control->stcb != NULL) &&
+ ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ uio->uio_resid = control->length;
+#else
+ uio_setresid(uio, control->length);
+#endif
+#else
+ uio->uio_resid = control->length;
+#endif
+ *mp = control->data;
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
+ freed_so_far += MSIZE;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ control->data = control->tail_mbuf = NULL;
+ control->length = 0;
+ if (out_flags & MSG_EOR) {
+ /* Done with this control */
+ goto done_with_control;
+ }
+ }
+ release:
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+#if defined(__Userspace__)
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+#else
+ if (hold_sblock == 1) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+#endif
+#if defined(__APPLE__) && !defined(__Userspace__)
+ sbunlock(&so->so_rcv, 1);
+#endif
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ sbunlock(&so->so_rcv);
+ sockbuf_lock = 0;
+#endif
+
+ release_unlocked:
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if ((stcb) && (in_flags & MSG_PEEK) == 0) {
+ if ((freed_so_far >= rwnd_req) &&
+ (control && (control->do_not_ref_stcb == 0)) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+ out:
+ if (msg_flags) {
+ *msg_flags = out_flags;
+ }
+ if (((out_flags & MSG_EOR) == 0) &&
+ ((in_flags & MSG_PEEK) == 0) &&
+ (sinfo) &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
+ struct sctp_extrcvinfo *s_extra;
+ s_extra = (struct sctp_extrcvinfo *)sinfo;
+ s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
+ }
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (sockbuf_lock) {
+ sbunlock(&so->so_rcv);
+ }
+#endif
+
+ if (freecnt_applied) {
+ /*
+ * The lock on the socket buffer protects us so the free
+ * code will stop. But since we used the socketbuf lock and
+ * the sender uses the tcb_lock to increment, we need to use
+ * the atomic add to the refcnt.
+ */
+ if (stcb == NULL) {
+#ifdef INVARIANTS
+ panic("stcb for refcnt has gone NULL?");
+ goto stage_left;
+#else
+ goto stage_left;
+#endif
+ }
+ /* Save the value back for next time */
+ stcb->freed_by_sorcv_sincelast = freed_so_far;
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
+ if (stcb) {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ ((uio) ? (slen - uio->uio_resid) : slen),
+#else
+ ((uio) ? (slen - uio_resid(uio)) : slen),
+#endif
+#else
+ (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
+#endif
+ stcb->asoc.my_rwnd,
+ so->so_rcv.sb_cc);
+ } else {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+#if defined(__APPLE__) && !defined(__Userspace__)
+#if defined(APPLE_LEOPARD)
+ ((uio) ? (slen - uio->uio_resid) : slen),
+#else
+ ((uio) ? (slen - uio_resid(uio)) : slen),
+#endif
+#else
+ (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
+#endif
+ 0,
+ so->so_rcv.sb_cc);
+ }
+ }
+ stage_left:
+ if (wakeup_read_socket) {
+ sctp_sorwakeup(inp, so);
+ }
+ return (error);
+}
+
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *
+sctp_m_free(struct mbuf *m)
+{
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mb(m, SCTP_MBUF_IFREE);
+ }
+ return (m_free(m));
+}
+
+void
+sctp_m_freem(struct mbuf *mb)
+{
+ while (mb != NULL)
+ mb = sctp_m_free(mb);
+}
+
+#endif
+
+int
+sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
+{
+ /* Given a local address. For all associations
+ * that holds the address, request a peer-set-primary.
+ */
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *wi;
+
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (ifa == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+ /* Now that we have the ifa we must awaken the
+ * iterator with this message.
+ */
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Now incr the count and int wi structure */
+ SCTP_INCR_LADDR_COUNT();
+ memset(wi, 0, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = ifa;
+ wi->action = SCTP_SET_PRIM_ADDR;
+ atomic_add_int(&ifa->refcount, 1);
+
+ /* Now add it to the work queue */
+ SCTP_WQ_ADDR_LOCK();
+ /*
+ * Should this really be a tailq? As it is we will process the
+ * newest first :-0
+ */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ SCTP_WQ_ADDR_UNLOCK();
+ return (0);
+}
+
+#if defined(__Userspace__)
+/* no sctp_soreceive for __Userspace__ now */
+#endif
+
+#if !defined(__Userspace__)
+int
+sctp_soreceive( struct socket *so,
+ struct sockaddr **psa,
+ struct uio *uio,
+ struct mbuf **mp0,
+ struct mbuf **controlp,
+ int *flagsp)
+{
+ int error, fromlen;
+ uint8_t sockbuf[256];
+ struct sockaddr *from;
+ struct sctp_extrcvinfo sinfo;
+ int filling_sinfo = 1;
+ int flags;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ /* pickup the assoc we are reading from */
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
+ (controlp == NULL)) {
+ /* user does not want the sndrcv ctl */
+ filling_sinfo = 0;
+ }
+ if (psa) {
+ from = (struct sockaddr *)sockbuf;
+ fromlen = sizeof(sockbuf);
+#ifdef HAVE_SA_LEN
+ from->sa_len = 0;
+#endif
+ } else {
+ from = NULL;
+ fromlen = 0;
+ }
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_LOCK(so, 1);
+#endif
+ if (filling_sinfo) {
+ memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
+ }
+ if (flagsp != NULL) {
+ flags = *flagsp;
+ } else {
+ flags = 0;
+ }
+ error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
+ (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
+ if (flagsp != NULL) {
+ *flagsp = flags;
+ }
+ if (controlp != NULL) {
+ /* copy back the sinfo in a CMSG format */
+ if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
+ *controlp = sctp_build_ctl_nchunk(inp,
+ (struct sctp_sndrcvinfo *)&sinfo);
+ } else {
+ *controlp = NULL;
+ }
+ }
+ if (psa) {
+ /* copy back the address info */
+#ifdef HAVE_SA_LEN
+ if (from && from->sa_len) {
+#else
+ if (from) {
+#endif
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ *psa = sodupsockaddr(from, M_NOWAIT);
+#else
+ *psa = dup_sockaddr(from, mp0 == 0);
+#endif
+ } else {
+ *psa = NULL;
+ }
+ }
+#if defined(__APPLE__) && !defined(__Userspace__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (error);
+}
+
+
+#if defined(_WIN32) && !defined(__Userspace__)
+/*
+ * General routine to allocate a hash table with control of memory flags.
+ * is in 7.0 and beyond for sure :-)
+ */
+void *
+sctp_hashinit_flags(int elements, struct malloc_type *type,
+ u_long *hashmask, int flags)
+{
+ long hashsize;
+ LIST_HEAD(generic, generic) *hashtbl;
+ int i;
+
+
+ if (elements <= 0) {
+#ifdef INVARIANTS
+ panic("hashinit: bad elements");
+#else
+ SCTP_PRINTF("hashinit: bad elements?");
+ elements = 1;
+#endif
+ }
+ for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
+ continue;
+ hashsize >>= 1;
+ if (flags & HASH_WAITOK)
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
+ else if (flags & HASH_NOWAIT)
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
+ else {
+#ifdef INVARIANTS
+ panic("flag incorrect in hashinit_flags");
+#else
+ return (NULL);
+#endif
+ }
+
+ /* no memory? */
+ if (hashtbl == NULL)
+ return (NULL);
+
+ for (i = 0; i < hashsize; i++)
+ LIST_INIT(&hashtbl[i]);
+ *hashmask = hashsize - 1;
+ return (hashtbl);
+}
+#endif
+
+#else /* __Userspace__ ifdef above sctp_soreceive */
+/*
+ * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
+ * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
+ *__FreeBSD__ must be excluded.
+ *
+ */
+
+void *
+sctp_hashinit_flags(int elements, struct malloc_type *type,
+ u_long *hashmask, int flags)
+{
+ long hashsize;
+ LIST_HEAD(generic, generic) *hashtbl;
+ int i;
+
+ if (elements <= 0) {
+ SCTP_PRINTF("hashinit: bad elements?");
+#ifdef INVARIANTS
+ return (NULL);
+#else
+ elements = 1;
+#endif
+ }
+ for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
+ continue;
+ hashsize >>= 1;
+ /*cannot use MALLOC here because it has to be declared or defined
+ using MALLOC_DECLARE or MALLOC_DEFINE first. */
+ if (flags & HASH_WAITOK)
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
+ else if (flags & HASH_NOWAIT)
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
+ else {
+#ifdef INVARIANTS
+ SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
+#endif
+ return (NULL);
+ }
+
+ /* no memory? */
+ if (hashtbl == NULL)
+ return (NULL);
+
+ for (i = 0; i < hashsize; i++)
+ LIST_INIT(&hashtbl[i]);
+ *hashmask = hashsize - 1;
+ return (hashtbl);
+}
+
+
+void
+sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
+{
+ LIST_HEAD(generic, generic) *hashtbl, *hp;
+
+ hashtbl = vhashtbl;
+ for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
+ if (!LIST_EMPTY(hp)) {
+ SCTP_PRINTF("hashdestroy: hash not empty.\n");
+ return;
+ }
+ FREE(hashtbl, type);
+}
+
+
+void
+sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
+{
+ LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
+ /*
+ LIST_ENTRY(type) *start, *temp;
+ */
+ hashtbl = vhashtbl;
+ /* Apparently temp is not dynamically allocated, so attempts to
+ free it results in error.
+ for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
+ if (!LIST_EMPTY(hp)) {
+ start = LIST_FIRST(hp);
+ while (start != NULL) {
+ temp = start;
+ start = start->le_next;
+ SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
+ FREE(temp, type);
+ }
+ }
+ */
+ FREE(hashtbl, type);
+}
+
+
+#endif
+
+
+int
+sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+ int totaddr, int *error)
+{
+ int added = 0;
+ int i;
+ struct sctp_inpcb *inp;
+ struct sockaddr *sa;
+ size_t incr = 0;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ sa = addr;
+ inp = stcb->sctp_ep;
+ *error = 0;
+ for (i = 0; i < totaddr; i++) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ incr = sizeof(struct sockaddr_in);
+ sin = (struct sockaddr_in *)sa;
+ if ((sin->sin_addr.s_addr == INADDR_ANY) ||
+ (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
+ *error = EINVAL;
+ goto out_now;
+ }
+ if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE,
+ SCTP_ADDR_IS_CONFIRMED)) {
+ /* assoc gone no un-lock */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
+ *error = ENOBUFS;
+ goto out_now;
+ }
+ added++;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ incr = sizeof(struct sockaddr_in6);
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
+ IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
+ *error = EINVAL;
+ goto out_now;
+ }
+ if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE,
+ SCTP_ADDR_IS_CONFIRMED)) {
+ /* assoc gone no un-lock */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
+ *error = ENOBUFS;
+ goto out_now;
+ }
+ added++;
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ incr = sizeof(struct sockaddr_conn);
+ if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+ SCTP_DONOT_SETSCOPE,
+ SCTP_ADDR_IS_CONFIRMED)) {
+ /* assoc gone no un-lock */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
+ *error = ENOBUFS;
+ goto out_now;
+ }
+ added++;
+ break;
+#endif
+ default:
+ break;
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+ out_now:
+ return (added);
+}
+
+int
+sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
+ unsigned int totaddr,
+ unsigned int *num_v4, unsigned int *num_v6,
+ unsigned int limit)
+{
+ struct sockaddr *sa;
+ struct sctp_tcb *stcb;
+ unsigned int incr, at, i;
+
+ at = 0;
+ sa = addr;
+ *num_v6 = *num_v4 = 0;
+ /* account and validate addresses */
+ if (totaddr == 0) {
+ return (EINVAL);
+ }
+ for (i = 0; i < totaddr; i++) {
+ if (at + sizeof(struct sockaddr) > limit) {
+ return (EINVAL);
+ }
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ incr = (unsigned int)sizeof(struct sockaddr_in);
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != incr) {
+ return (EINVAL);
+ }
+#endif
+ (*num_v4) += 1;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ /* Must be non-mapped for connectx */
+ return (EINVAL);
+ }
+ incr = (unsigned int)sizeof(struct sockaddr_in6);
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != incr) {
+ return (EINVAL);
+ }
+#endif
+ (*num_v6) += 1;
+ break;
+ }
+#endif
+ default:
+ return (EINVAL);
+ }
+ if ((at + incr) > limit) {
+ return (EINVAL);
+ }
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ return (EALREADY);
+ } else {
+ SCTP_INP_DECR_REF(inp);
+ }
+ at += incr;
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+ return (0);
+}
+
+/*
+ * sctp_bindx(ADD) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, uint32_t vrf_id, int *error,
+ void *p)
+{
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+#ifdef INET
+ struct sockaddr_in *sinp;
+#endif
+ struct sockaddr *addr_to_use;
+ struct sctp_inpcb *lep;
+#ifdef SCTP_MVRF
+ int i;
+#endif
+ uint16_t port;
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#ifdef SCTP_MVRF
+ /* Is the VRF one we have */
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (vrf_id == inp->m_vrf_ids[i]) {
+ break;
+ }
+ }
+ if (i == inp->num_vrfs) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ /* can only bind v6 on PF_INET6 sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ port = sin6->sin6_port;
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4-mapped on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ in6_sin6_2_sin(&sin, sin6);
+ addr_to_use = (struct sockaddr *)&sin;
+ } else {
+ addr_to_use = sa;
+ }
+#else
+ addr_to_use = sa;
+#endif
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ sinp = (struct sockaddr_in *)sa;
+ port = sinp->sin_port;
+ addr_to_use = sa;
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+#if !(defined(_WIN32) || defined(__Userspace__))
+ if (p == NULL) {
+ /* Can't get proc for Net/Open BSD */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ *error = sctp_inpcb_bind(so, addr_to_use, NULL, p);
+ return;
+ }
+ /* Validate the incoming port. */
+ if ((port != 0) && (port != inp->sctp_lport)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id);
+ if (lep == NULL) {
+ /* add the address */
+ *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use,
+ SCTP_ADD_IP_ADDRESS, vrf_id);
+ } else {
+ if (lep != inp) {
+ *error = EADDRINUSE;
+ }
+ SCTP_INP_DECR_REF(lep);
+ }
+}
+
+/*
+ * sctp_bindx(DELETE) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_delete_address(struct sctp_inpcb *inp,
+ struct sockaddr *sa, uint32_t vrf_id, int *error)
+{
+ struct sockaddr *addr_to_use;
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in sin;
+#endif
+#ifdef SCTP_MVRF
+ int i;
+#endif
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#ifdef SCTP_MVRF
+ /* Is the VRF one we have */
+ for (i = 0; i < inp->num_vrfs; i++) {
+ if (vrf_id == inp->m_vrf_ids[i]) {
+ break;
+ }
+ }
+ if (i == inp->num_vrfs) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ /* can only bind v6 on PF_INET6 sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind mapped-v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ in6_sin6_2_sin(&sin, sin6);
+ addr_to_use = (struct sockaddr *)&sin;
+ } else {
+ addr_to_use = sa;
+ }
+#else
+ addr_to_use = sa;
+#endif
+ break;
+#endif
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ addr_to_use = sa;
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ /* No lock required mgmt_ep_sa does its own locking. */
+ *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS,
+ vrf_id);
+}
+
+/*
+ * returns the valid local address count for an assoc, taking into account
+ * all scoping rules
+ */
+int
+sctp_local_addr_count(struct sctp_tcb *stcb)
+{
+ int loopback_scope;
+#if defined(INET)
+ int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ int conn_addr_legal;
+#endif
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ int count = 0;
+
+ /* Turn on all the appropriate scopes */
+ loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+ ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+ ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+ local_scope = stcb->asoc.scope.local_scope;
+ site_scope = stcb->asoc.scope.site_scope;
+ ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+ conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ /* no vrf, no addresses */
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+ }
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /*
+ * bound all case: go through all ifns on the vrf
+ */
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ continue;
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = &sctp_ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* skip unspecified addrs */
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin->sin_addr) != 0) {
+ continue;
+ }
+#endif
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ } else {
+ continue;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+ struct sockaddr_in6 lsa6;
+#endif
+ sin6 = &sctp_ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ continue;
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+ &sin6->sin6_addr) != 0) {
+ continue;
+ }
+#endif
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+ if (sa6_recoverscope(sin6) != 0)
+ /*
+ * bad link
+ * local
+ * address
+ */
+ continue;
+#else
+ lsa6 = *sin6;
+ if (in6_recoverscope(&lsa6,
+ &lsa6.sin6_addr,
+ NULL))
+ /*
+ * bad link
+ * local
+ * address
+ */
+ continue;
+ sin6 = &lsa6;
+#endif /* SCTP_KAME */
+ }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ }
+ break;
+#endif
+#if defined(__Userspace__)
+ case AF_CONN:
+ if (conn_addr_legal) {
+ count++;
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ /*
+ * subset bound case
+ */
+ struct sctp_laddr *laddr;
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (count);
+}
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+void
+sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
+{
+ uint32_t saveindex, newindex;
+
+#if defined(_WIN32) && !defined(__Userspace__)
+ if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
+ return;
+ }
+ do {
+ saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ newindex = 1;
+ } else {
+ newindex = saveindex + 1;
+ }
+ } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ saveindex = 0;
+ }
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
+ SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
+#else
+ do {
+ saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ newindex = 1;
+ } else {
+ newindex = saveindex + 1;
+ }
+ } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ saveindex = 0;
+ }
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
+#endif
+}
+
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static void
+sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
+ const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
+{
+ struct ip *iph;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+ struct mbuf *sp, *last;
+ struct udphdr *uhdr;
+ uint16_t port;
+
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ /* Can't handle one that is not a pkt hdr */
+ goto out;
+ }
+ /* Pull the src port */
+ iph = mtod(m, struct ip *);
+ uhdr = (struct udphdr *)((caddr_t)iph + off);
+ port = uhdr->uh_sport;
+ /* Split out the mbuf chain. Leave the
+ * IP header in m, place the
+ * rest in the sp.
+ */
+ sp = m_split(m, off, M_NOWAIT);
+ if (sp == NULL) {
+ /* Gak, drop packet, we can't do a split */
+ goto out;
+ }
+ if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
+ /* Gak, packet can't have an SCTP header in it - too small */
+ m_freem(sp);
+ goto out;
+ }
+ /* Now pull up the UDP header and SCTP header together */
+ sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
+ if (sp == NULL) {
+ /* Gak pullup failed */
+ goto out;
+ }
+ /* Trim out the UDP header */
+ m_adj(sp, sizeof(struct udphdr));
+
+ /* Now reconstruct the mbuf chain */
+ for (last = m; last->m_next; last = last->m_next);
+ last->m_next = sp;
+ m->m_pkthdr.len += sp->m_pkthdr.len;
+ /*
+ * The CSUM_DATA_VALID flags indicates that the HW checked the
+ * UDP checksum and it was valid.
+ * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
+ * the HW also verified the SCTP checksum. Therefore, clear the bit.
+ */
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
+ m->m_pkthdr.len,
+ if_name(m->m_pkthdr.rcvif),
+ (int)m->m_pkthdr.csum_flags, CSUM_BITS);
+ m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+#ifdef INET
+ case IPVERSION:
+ iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
+ sctp_input_with_port(m, off, port);
+ break;
+#endif
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
+ sctp6_input_with_port(&m, &off, port);
+ break;
+#endif
+ default:
+ goto out;
+ break;
+ }
+ return;
+ out:
+ m_freem(m);
+}
+
+#ifdef INET
+static void
+sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
+{
+ struct ip *outer_ip, *inner_ip;
+ struct sctphdr *sh;
+ struct icmp *icmp;
+ struct udphdr *udp;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctp_init_chunk *ch;
+ struct sockaddr_in src, dst;
+ uint8_t type, code;
+
+ inner_ip = (struct ip *)vip;
+ icmp = (struct icmp *)((caddr_t)inner_ip -
+ (sizeof(struct icmp) - sizeof(struct ip)));
+ outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
+ if (ntohs(outer_ip->ip_len) <
+ sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
+ return;
+ }
+ udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
+ sh = (struct sctphdr *)(udp + 1);
+ memset(&src, 0, sizeof(struct sockaddr_in));
+ src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ src.sin_len = sizeof(struct sockaddr_in);
+#endif
+ src.sin_port = sh->src_port;
+ src.sin_addr = inner_ip->ip_src;
+ memset(&dst, 0, sizeof(struct sockaddr_in));
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ dst.sin_port = sh->dest_port;
+ dst.sin_addr = inner_ip->ip_dst;
+ /*
+ * 'dst' holds the dest of the packet that failed to be sent.
+ * 'src' holds our local endpoint address. Thus we reverse
+ * the dst and the src in the lookup.
+ */
+ inp = NULL;
+ net = NULL;
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+ (struct sockaddr *)&src,
+ &inp, &net, 1,
+ SCTP_DEFAULT_VRFID);
+ if ((stcb != NULL) &&
+ (net != NULL) &&
+ (inp != NULL)) {
+ /* Check the UDP port numbers */
+ if ((udp->uh_dport != net->port) ||
+ (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* Check the verification tag */
+ if (ntohl(sh->v_tag) != 0) {
+ /*
+ * This must be the verification tag used
+ * for sending out packets. We don't
+ * consider packets reflecting the
+ * verification tag.
+ */
+ if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+ if (ntohs(outer_ip->ip_len) >=
+ sizeof(struct ip) +
+ 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
+ /*
+ * In this case we can check if we
+ * got an INIT chunk and if the
+ * initiate tag matches.
+ */
+ ch = (struct sctp_init_chunk *)(sh + 1);
+ if ((ch->ch.chunk_type != SCTP_INITIATION) ||
+ (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ }
+ type = icmp->icmp_type;
+ code = icmp->icmp_code;
+ if ((type == ICMP_UNREACH) &&
+ (code == ICMP_UNREACH_PORT)) {
+ code = ICMP_UNREACH_PROTOCOL;
+ }
+ sctp_notify(inp, stcb, net, type, code,
+ ntohs(inner_ip->ip_len),
+ (uint32_t)ntohs(icmp->icmp_nextmtu));
+#if defined(__Userspace__)
+ if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ struct socket *upcall_socket;
+
+ upcall_socket = stcb->sctp_socket;
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ if ((upcall_socket->so_upcall != NULL) &&
+ (upcall_socket->so_error != 0)) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ return;
+}
+#endif
+
+#ifdef INET6
+static void
+sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
+{
+ struct ip6ctlparam *ip6cp;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctphdr sh;
+ struct udphdr udp;
+ struct sockaddr_in6 src, dst;
+ uint8_t type, code;
+
+ ip6cp = (struct ip6ctlparam *)d;
+ /*
+ * XXX: We assume that when IPV6 is non NULL, M and OFF are
+ * valid.
+ */
+ if (ip6cp->ip6c_m == NULL) {
+ return;
+ }
+ /* Check if we can safely examine the ports and the
+ * verification tag of the SCTP common header.
+ */
+ if (ip6cp->ip6c_m->m_pkthdr.len <
+ ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
+ return;
+ }
+ /* Copy out the UDP header. */
+ memset(&udp, 0, sizeof(struct udphdr));
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off,
+ sizeof(struct udphdr),
+ (caddr_t)&udp);
+ /* Copy out the port numbers and the verification tag. */
+ memset(&sh, 0, sizeof(struct sctphdr));
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off + sizeof(struct udphdr),
+ sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
+ (caddr_t)&sh);
+ memset(&src, 0, sizeof(struct sockaddr_in6));
+ src.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ src.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ src.sin6_port = sh.src_port;
+ src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+ return;
+ }
+#endif
+ memset(&dst, 0, sizeof(struct sockaddr_in6));
+ dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ dst.sin6_port = sh.dest_port;
+ dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+ return;
+ }
+#endif
+ inp = NULL;
+ net = NULL;
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+ (struct sockaddr *)&src,
+ &inp, &net, 1, SCTP_DEFAULT_VRFID);
+ if ((stcb != NULL) &&
+ (net != NULL) &&
+ (inp != NULL)) {
+ /* Check the UDP port numbers */
+ if ((udp.uh_dport != net->port) ||
+ (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* Check the verification tag */
+ if (ntohl(sh.v_tag) != 0) {
+ /*
+ * This must be the verification tag used for
+ * sending out packets. We don't consider
+ * packets reflecting the verification tag.
+ */
+ if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ if (ip6cp->ip6c_m->m_pkthdr.len >=
+ ip6cp->ip6c_off + sizeof(struct udphdr) +
+ sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr) +
+ offsetof(struct sctp_init, a_rwnd)) {
+ /*
+ * In this case we can check if we
+ * got an INIT chunk and if the
+ * initiate tag matches.
+ */
+ uint32_t initiate_tag;
+ uint8_t chunk_type;
+
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off +
+ sizeof(struct udphdr) +
+ sizeof(struct sctphdr),
+ sizeof(uint8_t),
+ (caddr_t)&chunk_type);
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off +
+ sizeof(struct udphdr) +
+ sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr),
+ sizeof(uint32_t),
+ (caddr_t)&initiate_tag);
+ if ((chunk_type != SCTP_INITIATION) ||
+ (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+#else
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+#endif
+ }
+ type = ip6cp->ip6c_icmp6->icmp6_type;
+ code = ip6cp->ip6c_icmp6->icmp6_code;
+ if ((type == ICMP6_DST_UNREACH) &&
+ (code == ICMP6_DST_UNREACH_NOPORT)) {
+ type = ICMP6_PARAM_PROB;
+ code = ICMP6_PARAMPROB_NEXTHEADER;
+ }
+ sctp6_notify(inp, stcb, net, type, code,
+ ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
+#if defined(__Userspace__)
+ if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL)) {
+ struct socket *upcall_socket;
+
+ upcall_socket = stcb->sctp_socket;
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ if ((upcall_socket->so_upcall != NULL) &&
+ (upcall_socket->so_error != 0)) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce inp's ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+}
+#endif
+
+void
+sctp_over_udp_stop(void)
+{
+ /*
+ * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
+ */
+#ifdef INET
+ if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
+ soclose(SCTP_BASE_INFO(udp4_tun_socket));
+ SCTP_BASE_INFO(udp4_tun_socket) = NULL;
+ }
+#endif
+#ifdef INET6
+ if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
+ soclose(SCTP_BASE_INFO(udp6_tun_socket));
+ SCTP_BASE_INFO(udp6_tun_socket) = NULL;
+ }
+#endif
+}
+
+int
+sctp_over_udp_start(void)
+{
+ uint16_t port;
+ int ret;
+#ifdef INET
+ struct sockaddr_in sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+ /*
+ * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
+ */
+ port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+ if (ntohs(port) == 0) {
+ /* Must have a port set */
+ return (EINVAL);
+ }
+#ifdef INET
+ if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
+ /* Already running -- must stop first */
+ return (EALREADY);
+ }
+#endif
+#ifdef INET6
+ if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
+ /* Already running -- must stop first */
+ return (EALREADY);
+ }
+#endif
+#ifdef INET
+ if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
+ SOCK_DGRAM, IPPROTO_UDP,
+ curthread->td_ucred, curthread))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+ /* Call the special UDP hook. */
+ if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
+ sctp_recv_udp_tunneled_packet,
+ sctp_recv_icmp_tunneled_packet,
+ NULL))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+ /* Ok, we have a socket, bind it to the port. */
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
+ (struct sockaddr *)&sin, curthread))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+#endif
+#ifdef INET6
+ if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
+ SOCK_DGRAM, IPPROTO_UDP,
+ curthread->td_ucred, curthread))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+ /* Call the special UDP hook. */
+ if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
+ sctp_recv_udp_tunneled_packet,
+ sctp_recv_icmp6_tunneled_packet,
+ NULL))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+ /* Ok, we have a socket, bind it to the port. */
+ memset(&sin6, 0, sizeof(struct sockaddr_in6));
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
+ (struct sockaddr *)&sin6, curthread))) {
+ sctp_over_udp_stop();
+ return (ret);
+ }
+#endif
+ return (0);
+}
+#endif
+
+/*
+ * sctp_min_mtu ()returns the minimum of all non-zero arguments.
+ * If all arguments are zero, zero is returned.
+ */
+uint32_t
+sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
+{
+ if (mtu1 > 0) {
+ if (mtu2 > 0) {
+ if (mtu3 > 0) {
+ return (min(mtu1, min(mtu2, mtu3)));
+ } else {
+ return (min(mtu1, mtu2));
+ }
+ } else {
+ if (mtu3 > 0) {
+ return (min(mtu1, mtu3));
+ } else {
+ return (mtu1);
+ }
+ }
+ } else {
+ if (mtu2 > 0) {
+ if (mtu3 > 0) {
+ return (min(mtu2, mtu3));
+ } else {
+ return (mtu2);
+ }
+ } else {
+ return (mtu3);
+ }
+ }
+}
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+void
+sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
+{
+ struct in_conninfo inc;
+
+ memset(&inc, 0, sizeof(struct in_conninfo));
+ inc.inc_fibnum = fibnum;
+ switch (addr->sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ inc.inc_faddr = addr->sin.sin_addr;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ inc.inc_flags |= INC_ISIPV6;
+ inc.inc6_faddr = addr->sin6.sin6_addr;
+ break;
+#endif
+ default:
+ return;
+ }
+ tcp_hc_updatemtu(&inc, (u_long)mtu);
+}
+
+uint32_t
+sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
+{
+ struct in_conninfo inc;
+
+ memset(&inc, 0, sizeof(struct in_conninfo));
+ inc.inc_fibnum = fibnum;
+ switch (addr->sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ inc.inc_faddr = addr->sin.sin_addr;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ inc.inc_flags |= INC_ISIPV6;
+ inc.inc6_faddr = addr->sin6.sin6_addr;
+ break;
+#endif
+ default:
+ return (0);
+ }
+ return ((uint32_t)tcp_hc_getmtu(&inc));
+}
+#endif
+
+void
+sctp_set_state(struct sctp_tcb *stcb, int new_state)
+{
+#if defined(KDTRACE_HOOKS)
+ int old_state = stcb->asoc.state;
+#endif
+
+ KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
+ ("sctp_set_state: Can't set substate (new_state = %x)",
+ new_state));
+ stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
+ if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
+ (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
+ }
+#if defined(KDTRACE_HOOKS)
+ if (((old_state & SCTP_STATE_MASK) != new_state) &&
+ !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
+ (new_state == SCTP_STATE_INUSE))) {
+ SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
+ }
+#endif
+}
+
+void
+sctp_add_substate(struct sctp_tcb *stcb, int substate)
+{
+#if defined(KDTRACE_HOOKS)
+ int old_state = stcb->asoc.state;
+#endif
+
+ KASSERT((substate & SCTP_STATE_MASK) == 0,
+ ("sctp_add_substate: Can't set state (substate = %x)",
+ substate));
+ stcb->asoc.state |= substate;
+#if defined(KDTRACE_HOOKS)
+ if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
+ ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
+ ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
+ SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
+ }
+#endif
+}
+
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.h b/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.h
new file mode 100644
index 000000000..84e83036b
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet/sctputil.h
@@ -0,0 +1,422 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctputil.h 363323 2020-07-19 12:34:19Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_UTIL_H_
+#define _NETINET_SCTP_UTIL_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_READ_LOCK_HELD 1
+#define SCTP_READ_LOCK_NOT_HELD 0
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void sctp_print_out_track_log(struct sctp_tcb *stcb);
+#endif
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *sctp_m_free(struct mbuf *m);
+void sctp_m_freem(struct mbuf *m);
+#else
+#define sctp_m_free m_free
+#define sctp_m_freem m_freem
+#endif
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+void
+sctp_log_trace(uint32_t fr, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f);
+#endif
+
+#define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id)
+
+
+/*
+ * Function prototypes
+ */
+int32_t
+sctp_map_assoc_state(int);
+
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr);
+
+struct sctp_ifa *
+sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int hold_lock);
+
+struct sctp_ifa *
+sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock);
+
+uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
+
+uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
+
+int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t, uint16_t);
+
+void sctp_fill_random_store(struct sctp_pcb *);
+
+void
+sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin,
+ uint16_t numberout, int flag);
+void
+sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag);
+
+/*
+ * NOTE: sctp_timer_start() will increment the reference count of any relevant
+ * structure the timer is referencing, in order to prevent a race condition
+ * between the timer executing and the structure being freed.
+ *
+ * When the timer fires or sctp_timer_stop() is called, these references are
+ * removed.
+ */
+void
+sctp_timer_start(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_timer_stop(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint32_t);
+
+int
+sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id);
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *, struct sctp_association *, uint32_t);
+
+void
+sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ int so_locked
+#if !(defined(__APPLE__) && !defined(__Userspace__))
+ SCTP_UNUSED
+#endif
+);
+
+#if defined(__Userspace__)
+void sctp_invoke_recv_callback(struct sctp_inpcb *,
+ struct sctp_tcb *,
+ struct sctp_queued_to_read *,
+ int);
+
+#endif
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end,
+ int inpread_locked,
+ int so_locked);
+
+void sctp_iterator_worker(void);
+
+uint32_t sctp_get_prev_mtu(uint32_t);
+uint32_t sctp_get_next_mtu(uint32_t);
+
+void
+sctp_timeout_handler(void *);
+
+int
+sctp_calculate_rto(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_nets *, struct timeval *, int);
+
+uint32_t sctp_calculate_len(struct mbuf *);
+
+caddr_t sctp_m_getptr(struct mbuf *, int, int, uint8_t *);
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *, int,
+ struct sctp_paramhdr *, int);
+
+struct mbuf *
+sctp_add_pad_tombuf(struct mbuf *, int);
+
+struct mbuf *
+sctp_pad_lastmbuf(struct mbuf *, int, struct mbuf *);
+
+void sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *, int);
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb, int waitflags);
+
+
+void sctp_stop_timers_for_shutdown(struct sctp_tcb *);
+
+/* Stop all timers for association and remote addresses. */
+void sctp_stop_association_timers(struct sctp_tcb *, bool);
+
+void sctp_report_all_outbound(struct sctp_tcb *, uint16_t, int, int);
+
+int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
+
+void sctp_abort_notification(struct sctp_tcb *, uint8_t, uint16_t,
+ struct sctp_abort_chunk *, int);
+
+/* We abort responding to an IP packet for some reason */
+void
+sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *, struct mbuf *,
+ int, struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, struct mbuf *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t,
+#endif
+ uint32_t, uint16_t);
+
+
+/* We choose to abort via user input */
+void
+sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *,
+ struct mbuf *, int);
+
+void sctp_handle_ootb(struct mbuf *, int, int,
+ struct sockaddr *, struct sockaddr *,
+ struct sctphdr *, struct sctp_inpcb *,
+ struct mbuf *,
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ uint8_t, uint32_t, uint16_t,
+#endif
+ uint32_t, uint16_t);
+
+int sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+ int totaddr, int *error);
+
+int
+sctp_connectx_helper_find(struct sctp_inpcb *, struct sockaddr *,
+ unsigned int, unsigned int *, unsigned int *, unsigned int);
+
+int sctp_is_there_an_abort_here(struct mbuf *, int, uint32_t *);
+#ifdef INET6
+uint32_t sctp_is_same_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+#ifdef SCTP_KAME
+#define sctp_recover_scope_mac(addr, store) do { \
+ if ((addr->sin6_family == AF_INET6) && \
+ (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr))) { \
+ *store = *addr; \
+ if (addr->sin6_scope_id == 0) { \
+ if (!sa6_recoverscope(store)) { \
+ addr = store; \
+ } \
+ } else { \
+ in6_clearscope(&addr->sin6_addr); \
+ addr = store; \
+ } \
+ } \
+} while (0)
+#else
+#define sctp_recover_scope_mac(addr, store) do { \
+ if ((addr->sin6_family == AF_INET6) && \
+ (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr))) { \
+ *store = *addr; \
+ if (addr->sin6_scope_id == 0) { \
+ if (!in6_recoverscope(store, &store->sin6_addr, \
+ NULL)) { \
+ addr = store; \
+ } \
+ } else { \
+ in6_clearscope(&addr->sin6_addr); \
+ addr = store; \
+ } \
+ } \
+} while (0)
+#endif
+#endif
+#endif
+
+int sctp_cmpaddr(struct sockaddr *, struct sockaddr *);
+
+void sctp_print_address(struct sockaddr *);
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *, struct sctp_tmit_chunk *,
+ uint8_t, int);
+
+struct mbuf *sctp_generate_cause(uint16_t, char *);
+struct mbuf *sctp_generate_no_user_data_cause(uint32_t);
+
+void sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, uint32_t vrf_id, int *error,
+ void *p);
+void sctp_bindx_delete_address(struct sctp_inpcb *inp, struct sockaddr *sa,
+ uint32_t vrf_id, int *error);
+
+int sctp_local_addr_count(struct sctp_tcb *stcb);
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_tmit_chunk *, int);
+
+#else
+#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt) \
+do { \
+ if (tp1->data != NULL) { \
+ atomic_subtract_int(&((asoc)->chunks_on_out_queue), chk_cnt); \
+ if ((asoc)->total_output_queue_size >= tp1->book_size) { \
+ atomic_subtract_int(&((asoc)->total_output_queue_size), tp1->book_size); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { \
+ atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#endif
+
+#define sctp_free_spbufspace(stcb, asoc, sp) \
+do { \
+ if (sp->data != NULL) { \
+ if ((asoc)->total_output_queue_size >= sp->length) { \
+ atomic_subtract_int(&(asoc)->total_output_queue_size, sp->length); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= sp->length) { \
+ atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc,sp->length); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define sctp_snd_sb_alloc(stcb, sz) \
+do { \
+ atomic_add_int(&stcb->asoc.total_output_queue_size,sz); \
+ if ((stcb->sctp_socket != NULL) && \
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sz); \
+ } \
+} while (0)
+
+/* functions to start/stop udp tunneling */
+#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(__Userspace__)
+void sctp_over_udp_stop(void);
+int sctp_over_udp_start(void);
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+void sctp_over_udp_restart(void);
+#endif
+
+int
+sctp_soreceive(struct socket *so, struct sockaddr **psa,
+ struct uio *uio,
+ struct mbuf **mp0,
+ struct mbuf **controlp,
+ int *flagsp);
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d);
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb,
+ uint32_t wake_cnt, int from);
+
+void sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t, uint16_t, uint16_t, int);
+
+void sctp_log_nagle_event(struct sctp_tcb *stcb, int action);
+
+
+#ifdef SCTP_MBUF_LOGGING
+void
+sctp_log_mb(struct mbuf *m, int from);
+
+void
+sctp_log_mbc(struct mbuf *m, int from);
+#endif
+
+void
+sctp_sblog(struct sockbuf *sb,
+ struct sctp_tcb *stcb, int from, int incr);
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control,
+ struct sctp_queued_to_read *poschk,
+ int from);
+void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *, int, uint8_t);
+void rto_logging(struct sctp_nets *net, int from);
+
+void sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc);
+
+void sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from);
+void sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *, int, int, uint8_t);
+void sctp_log_block(uint8_t, struct sctp_association *, ssize_t);
+void sctp_log_rwnd(uint8_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_rwnd_set(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+int sctp_fill_stat_log(void *, size_t *);
+void sctp_log_fr(uint32_t, uint32_t, uint32_t, int);
+void sctp_log_sack(uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, int);
+void sctp_log_map(uint32_t, uint32_t, uint32_t, int);
+void sctp_print_mapping_array(struct sctp_association *asoc);
+void sctp_clr_stat_log(void);
+
+
+#ifdef SCTP_AUDITING_ENABLED
+void
+sctp_auditing(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+void sctp_audit_log(uint8_t, uint8_t);
+
+#endif
+uint32_t sctp_min_mtu(uint32_t, uint32_t, uint32_t);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+void sctp_hc_set_mtu(union sctp_sockstore *, uint16_t, uint32_t);
+uint32_t sctp_hc_get_mtu(union sctp_sockstore *, uint16_t);
+#endif
+void sctp_set_state(struct sctp_tcb *, int);
+void sctp_add_substate(struct sctp_tcb *, int);
+uint32_t sctp_ticks_to_msecs(uint32_t);
+uint32_t sctp_msecs_to_ticks(uint32_t);
+uint32_t sctp_ticks_to_secs(uint32_t);
+uint32_t sctp_secs_to_ticks(uint32_t);
+
+#endif /* _KERNEL */
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet6/meson.build b/ext/sctp/usrsctp/usrsctplib/netinet6/meson.build
new file mode 100644
index 000000000..04307df59
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet6/meson.build
@@ -0,0 +1 @@
+sources += files('sctp6_usrreq.c')
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_usrreq.c b/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_usrreq.c
new file mode 100644
index 000000000..1fc81daa8
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_usrreq.c
@@ -0,0 +1,1809 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet6/sctp6_usrreq.c 361895 2020-06-07 14:39:20Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#ifdef INET6
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_var.h>
+#include <netinet6/sctp6_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_crc32.h>
+#if !defined(_WIN32)
+#include <netinet/icmp6.h>
+#include <netinet/udp.h>
+#endif
+#if defined(__Userspace__)
+int ip6_v6only=0;
+#endif
+#if defined(__Userspace__)
+#ifdef INET
+void
+in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6)
+{
+#if defined(_WIN32)
+ uint32_t temp;
+#endif
+ memset(sin, 0, sizeof(*sin));
+#ifdef HAVE_SIN_LEN
+ sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+ sin->sin_family = AF_INET;
+ sin->sin_port = sin6->sin6_port;
+#if defined(_WIN32)
+ temp = sin6->sin6_addr.s6_addr16[7];
+ temp = temp << 16;
+ temp = temp | sin6->sin6_addr.s6_addr16[6];
+ sin->sin_addr.s_addr = temp;
+#else
+ sin->sin_addr.s_addr = sin6->sin6_addr.s6_addr32[3];
+#endif
+}
+
+void
+in6_sin6_2_sin_in_sock(struct sockaddr *nam)
+{
+ struct sockaddr_in *sin_p;
+ struct sockaddr_in6 sin6;
+
+ /* save original sockaddr_in6 addr and convert it to sockaddr_in */
+ sin6 = *(struct sockaddr_in6 *)nam;
+ sin_p = (struct sockaddr_in *)nam;
+ in6_sin6_2_sin(sin_p, &sin6);
+}
+
+void
+in6_sin_2_v4mapsin6(const struct sockaddr_in *sin, struct sockaddr_in6 *sin6)
+{
+ memset(sin6, 0, sizeof(struct sockaddr_in6));
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ sin6->sin6_port = sin->sin_port;
+#if defined(_WIN32)
+ ((uint32_t *)&sin6->sin6_addr)[0] = 0;
+ ((uint32_t *)&sin6->sin6_addr)[1] = 0;
+ ((uint32_t *)&sin6->sin6_addr)[2] = htonl(0xffff);
+ ((uint32_t *)&sin6->sin6_addr)[3] = sin->sin_addr.s_addr;
+#else
+ sin6->sin6_addr.s6_addr32[0] = 0;
+ sin6->sin6_addr.s6_addr32[1] = 0;
+ sin6->sin6_addr.s6_addr32[2] = htonl(0xffff);
+ sin6->sin6_addr.s6_addr32[3] = sin->sin_addr.s_addr;
+#endif
+}
+#endif
+#endif
+
+#if !defined(__Userspace__)
+int
+#if defined(__APPLE__) || defined(__FreeBSD__)
+sctp6_input_with_port(struct mbuf **i_pak, int *offp, uint16_t port)
+#else
+sctp6_input(struct mbuf **i_pak, int *offp, int proto)
+#endif
+{
+ struct mbuf *m;
+ int iphlen;
+ uint32_t vrf_id;
+ uint8_t ecn_bits;
+ struct sockaddr_in6 src, dst;
+ struct ip6_hdr *ip6;
+ struct sctphdr *sh;
+ struct sctp_chunkhdr *ch;
+ int length, offset;
+ uint8_t compute_crc;
+#if defined(__FreeBSD__)
+ uint32_t mflowid;
+ uint8_t mflowtype;
+ uint16_t fibnum;
+#endif
+#if !(defined(__APPLE__) || defined(__FreeBSD__))
+ uint16_t port = 0;
+#endif
+
+ iphlen = *offp;
+ if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) {
+ SCTP_RELEASE_PKT(*i_pak);
+ return (IPPROTO_DONE);
+ }
+ m = SCTP_HEADER_TO_CHAIN(*i_pak);
+#ifdef SCTP_MBUF_LOGGING
+ /* Log in any input mbufs */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ sctp_log_mbc(m, SCTP_MBUF_INPUT);
+ }
+#endif
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+ sctp_packet_log(m);
+ }
+#endif
+#if defined(__FreeBSD__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
+ m->m_pkthdr.len,
+ if_name(m->m_pkthdr.rcvif),
+ (int)m->m_pkthdr.csum_flags, CSUM_BITS);
+#endif
+#if defined(__APPLE__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp6_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ m->m_pkthdr.rcvif->if_name,
+ m->m_pkthdr.rcvif->if_unit,
+ m->m_pkthdr.csum_flags);
+#endif
+#if defined(_WIN32) && !defined(__Userspace__)
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ m->m_pkthdr.rcvif->if_xname,
+ m->m_pkthdr.csum_flags);
+#endif
+#if defined(__FreeBSD__)
+ mflowid = m->m_pkthdr.flowid;
+ mflowtype = M_HASHTYPE_GET(m);
+ fibnum = M_GETFIB(m);
+#endif
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+ /* Get IP, SCTP, and first chunk header together in the first mbuf. */
+ offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ if (m->m_len < offset) {
+ m = m_pullup(m, offset);
+ if (m == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return (IPPROTO_DONE);
+ }
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ sh = (struct sctphdr *)(mtod(m, caddr_t) + iphlen);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset -= sizeof(struct sctp_chunkhdr);
+ memset(&src, 0, sizeof(struct sockaddr_in6));
+ src.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ src.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ src.sin6_port = sh->src_port;
+ src.sin6_addr = ip6->ip6_src;
+#if defined(__FreeBSD__)
+#if defined(__APPLE__)
+ /* XXX: This code should also be used on Apple */
+#endif
+ if (in6_setscope(&src.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) {
+ goto out;
+ }
+#endif
+ memset(&dst, 0, sizeof(struct sockaddr_in6));
+ dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ dst.sin6_port = sh->dest_port;
+ dst.sin6_addr = ip6->ip6_dst;
+#if defined(__FreeBSD__)
+#if defined(__APPLE__)
+ /* XXX: This code should also be used on Apple */
+#endif
+ if (in6_setscope(&dst.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) {
+ goto out;
+ }
+#endif
+#if defined(__APPLE__)
+#if defined(NFAITH) && 0 < NFAITH
+ if (faithprefix(&dst.sin6_addr)) {
+ goto out;
+ }
+#endif
+#endif
+ length = ntohs(ip6->ip6_plen) + iphlen;
+ /* Validate mbuf chain length with IP payload length. */
+ if (SCTP_HEADER_LEN(m) != length) {
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "sctp6_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto out;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ goto out;
+ }
+ ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff);
+#if defined(__FreeBSD__)
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ compute_crc = 0;
+ } else {
+#else
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (IN6_ARE_ADDR_EQUAL(&src.sin6_addr, &dst.sin6_addr))) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ compute_crc = 0;
+ } else {
+#endif
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ compute_crc = 1;
+ }
+ sctp_common_input_processing(&m, iphlen, offset, length,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ ecn_bits,
+#if defined(__FreeBSD__)
+ mflowtype, mflowid, fibnum,
+#endif
+ vrf_id, port);
+ out:
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return (IPPROTO_DONE);
+}
+
+#if defined(__APPLE__)
+int
+sctp6_input(struct mbuf **i_pak, int *offp)
+{
+ return (sctp6_input_with_port(i_pak, offp, 0));
+}
+#endif
+
+#if defined(__FreeBSD__)
+int
+sctp6_input(struct mbuf **i_pak, int *offp, int proto SCTP_UNUSED)
+{
+ return (sctp6_input_with_port(i_pak, offp, 0));
+}
+#endif
+
+void
+sctp6_notify(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint8_t icmp6_type,
+ uint8_t icmp6_code,
+ uint32_t next_mtu)
+{
+#if defined(__APPLE__)
+ struct socket *so;
+#endif
+ int timer_stopped;
+
+ switch (icmp6_type) {
+ case ICMP6_DST_UNREACH:
+ if ((icmp6_code == ICMP6_DST_UNREACH_NOROUTE) ||
+ (icmp6_code == ICMP6_DST_UNREACH_ADMIN) ||
+ (icmp6_code == ICMP6_DST_UNREACH_BEYONDSCOPE) ||
+ (icmp6_code == ICMP6_DST_UNREACH_ADDR)) {
+ /* Mark the net unreachable. */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* Ok that destination is not reachable */
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_PF;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, 0, (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ case ICMP6_PARAM_PROB:
+ /* Treat it like an ABORT. */
+ if (icmp6_code == ICMP6_PARAMPROB_NEXTHEADER) {
+ sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED);
+#if defined(__APPLE__)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
+#if defined(__APPLE__)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case ICMP6_PACKET_TOO_BIG:
+ if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ timer_stopped = 1;
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
+ } else {
+ timer_stopped = 0;
+ }
+ /* Update the path MTU. */
+ if (net->port) {
+ next_mtu -= sizeof(struct udphdr);
+ }
+ if (net->mtu > next_mtu) {
+ net->mtu = next_mtu;
+#if defined(__FreeBSD__)
+ if (net->port) {
+ sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr));
+ } else {
+ sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu);
+ }
+#endif
+ }
+ /* Update the association MTU */
+ if (stcb->asoc.smallest_mtu > next_mtu) {
+ sctp_pathmtu_adjustment(stcb, next_mtu);
+ }
+ /* Finally, start the PMTU timer if it was running before. */
+ if (timer_stopped) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ default:
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+}
+
+void
+#if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN)
+sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d, struct ifnet *ifp SCTP_UNUSED)
+#else
+sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d)
+#endif
+{
+ struct ip6ctlparam *ip6cp;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctphdr sh;
+ struct sockaddr_in6 src, dst;
+
+#ifdef HAVE_SA_LEN
+ if (pktdst->sa_family != AF_INET6 ||
+ pktdst->sa_len != sizeof(struct sockaddr_in6)) {
+#else
+ if (pktdst->sa_family != AF_INET6) {
+#endif
+ return;
+ }
+
+ if ((unsigned)cmd >= PRC_NCMDS) {
+ return;
+ }
+ if (PRC_IS_REDIRECT(cmd)) {
+ d = NULL;
+ } else if (inet6ctlerrmap[cmd] == 0) {
+ return;
+ }
+ /* If the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ } else {
+ ip6cp = (struct ip6ctlparam *)NULL;
+ }
+
+ if (ip6cp != NULL) {
+ /*
+ * XXX: We assume that when IPV6 is non NULL, M and OFF are
+ * valid.
+ */
+ if (ip6cp->ip6c_m == NULL) {
+ return;
+ }
+
+ /* Check if we can safely examine the ports and the
+ * verification tag of the SCTP common header.
+ */
+ if (ip6cp->ip6c_m->m_pkthdr.len <
+ (int32_t)(ip6cp->ip6c_off + offsetof(struct sctphdr, checksum))) {
+ return;
+ }
+
+ /* Copy out the port numbers and the verification tag. */
+ memset(&sh, 0, sizeof(sh));
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off,
+ sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
+ (caddr_t)&sh);
+ memset(&src, 0, sizeof(struct sockaddr_in6));
+ src.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ src.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ src.sin6_port = sh.src_port;
+ src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
+#if defined(__FreeBSD__)
+ if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+ return;
+ }
+#endif
+ memset(&dst, 0, sizeof(struct sockaddr_in6));
+ dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ dst.sin6_port = sh.dest_port;
+ dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
+#if defined(__FreeBSD__)
+ if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+ return;
+ }
+#endif
+ inp = NULL;
+ net = NULL;
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+ (struct sockaddr *)&src,
+ &inp, &net, 1, SCTP_DEFAULT_VRFID);
+ if ((stcb != NULL) &&
+ (net != NULL) &&
+ (inp != NULL)) {
+ /* Check the verification tag */
+ if (ntohl(sh.v_tag) != 0) {
+ /*
+ * This must be the verification tag used for
+ * sending out packets. We don't consider
+ * packets reflecting the verification tag.
+ */
+ if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+#if defined(__FreeBSD__)
+ if (ip6cp->ip6c_m->m_pkthdr.len >=
+ ip6cp->ip6c_off + sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr) +
+ offsetof(struct sctp_init, a_rwnd)) {
+ /*
+ * In this case we can check if we
+ * got an INIT chunk and if the
+ * initiate tag matches.
+ */
+ uint32_t initiate_tag;
+ uint8_t chunk_type;
+
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off +
+ sizeof(struct sctphdr),
+ sizeof(uint8_t),
+ (caddr_t)&chunk_type);
+ m_copydata(ip6cp->ip6c_m,
+ ip6cp->ip6c_off +
+ sizeof(struct sctphdr) +
+ sizeof(struct sctp_chunkhdr),
+ sizeof(uint32_t),
+ (caddr_t)&initiate_tag);
+ if ((chunk_type != SCTP_INITIATION) ||
+ (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+#else
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+#endif
+ }
+ sctp6_notify(inp, stcb, net,
+ ip6cp->ip6c_icmp6->icmp6_type,
+ ip6cp->ip6c_icmp6->icmp6_code,
+ ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
+#if defined(__Userspace__)
+ if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb->sctp_socket != NULL) {
+ struct socket *upcall_socket;
+
+ upcall_socket = stcb->sctp_socket;
+ SOCK_LOCK(upcall_socket);
+ soref(upcall_socket);
+ SOCK_UNLOCK(upcall_socket);
+ if ((upcall_socket->so_upcall != NULL) &&
+ (upcall_socket->so_error != 0)) {
+ (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(upcall_socket);
+ sorele(upcall_socket);
+ }
+#endif
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce inp's ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ }
+}
+#endif
+
+/*
+ * this routine can probably be collasped into the one in sctp_userreq.c
+ * since they do the same thing and now we lookup with a sockaddr
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+static int
+sctp6_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in6 addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error;
+ uint32_t vrf_id;
+
+ vrf_id = SCTP_DEFAULT_VRFID;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+#else
+ error = suser(req->p);
+#endif
+ if (error)
+ return (error);
+
+ if (req->newlen != sizeof(addrs)) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (req->oldlen != sizeof(struct ucred)) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[1]),
+ sin6tosa(&addrs[0]),
+ &inp, &net, 1, vrf_id);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ if ((inp != NULL) && (stcb == NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ goto cred_can_cont;
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ error = ENOENT;
+ goto out;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /* We use the write lock here, only
+ * since in the error leg we need it.
+ * If we used RLOCK, then we would have
+ * to wlock/decr/unlock/rlock. Which
+ * in theory could create a hole. Better
+ * to use higher wlock.
+ */
+ SCTP_INP_WLOCK(inp);
+ cred_can_cont:
+ error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
+ if (error) {
+ SCTP_INP_WUNLOCK(inp);
+ goto out;
+ }
+ cru2x(inp->sctp_socket->so_cred, &xuc);
+ SCTP_INP_WUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+out:
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ 0, 0,
+ sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection");
+
+#endif
+
+/* This is the same as the sctp_abort() could be made common */
+#if defined(__Userspace__)
+int
+#elif defined(__FreeBSD__) || defined(_WIN32)
+static void
+#else
+static int
+#endif
+sctp6_abort(struct socket *so)
+{
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
+ return;
+#else
+ return (EINVAL);
+#endif
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /* same for the rcv ones, they are only
+ * here for the accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+#if defined(__APPLE__) && !defined(__Userspace__)
+ so->so_usecount--;
+#else
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+#endif
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+ return;
+#else
+ return (0);
+#endif
+}
+
+#if defined(__Userspace__)
+int
+sctp6_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id)
+#elif defined(__FreeBSD__)
+static int
+sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED)
+#elif defined(_WIN32)
+static int
+sctp6_attach(struct socket *so, int proto SCTP_UNUSED, PKTHREAD p SCTP_UNUSED)
+#else
+static int
+sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED)
+#endif
+{
+ int error;
+ struct sctp_inpcb *inp;
+#if !defined(__Userspace__)
+ uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+ if (error)
+ return (error);
+ }
+ error = sctp_inpcb_alloc(so, vrf_id);
+ if (error)
+ return (error);
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */
+
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ inp->ip_inp.inp.in6p_hops = -1; /* use kernel default */
+ inp->ip_inp.inp.in6p_cksum = -1; /* just to be sure */
+#ifdef INET
+ /*
+ * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6
+ * socket as well, because the socket may be bound to an IPv6
+ * wildcard address, which may match an IPv4-mapped IPv6 address.
+ */
+ inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ return (0);
+}
+
+#if defined(__Userspace__)
+int
+sctp6_bind(struct socket *so, struct sockaddr *addr, void * p)
+{
+#elif defined(__FreeBSD__)
+static int
+sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#elif defined(__APPLE__)
+static int
+sctp6_bind(struct socket *so, struct sockaddr *addr, struct proc *p)
+{
+#elif defined(_WIN32)
+static int
+sctp6_bind(struct socket *so, struct sockaddr *addr, PKTHREAD p)
+{
+#else
+static int
+sctp6_bind(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+ struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *): NULL;
+
+#endif
+ struct sctp_inpcb *inp;
+ int error;
+ u_char vflagsav;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ if (addr) {
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ }
+#endif
+ vflagsav = inp->ip_inp.inp.inp_vflag;
+ inp->ip_inp.inp.inp_vflag &= ~INP_IPV4;
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ if ((addr != NULL) && (SCTP_IPV6_V6ONLY(inp) == 0)) {
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ /* binding v4 addr to v6 socket, so reset flags */
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ inp->ip_inp.inp.inp_vflag &= ~INP_IPV6;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ }
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6_p);
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ inp->ip_inp.inp.inp_vflag &= ~INP_IPV6;
+ error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p);
+ goto out;
+ }
+#endif
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+ } else if (addr != NULL) {
+ struct sockaddr_in6 *sin6_p;
+
+ /* IPV6_V6ONLY socket */
+#ifdef INET
+ if (addr->sa_family == AF_INET) {
+ /* can't bind v4 addr to v6 only socket! */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+#endif
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ /* can't bind v4-mapped addrs either! */
+ /* NOTE: we don't support SIIT */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ }
+ error = sctp_inpcb_bind(so, addr, NULL, p);
+out:
+ if (error != 0)
+ inp->ip_inp.inp.inp_vflag = vflagsav;
+ return (error);
+}
+
+
+#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__)
+#if !defined(__Userspace__)
+static void
+#else
+void
+#endif
+sctp6_close(struct socket *so)
+{
+ sctp_close(so);
+}
+
+/* This could be made common with sctp_detach() since they are identical */
+#else
+
+static
+int
+sctp6_detach(struct socket *so)
+{
+#if defined(__Userspace__)
+ sctp_close(so);
+ return (0);
+#else
+ return (sctp_detach(so));
+#endif
+}
+
+#endif
+
+#if !defined(__Userspace__)
+static
+#endif
+int
+sctp6_disconnect(struct socket *so)
+{
+ return (sctp_disconnect(so));
+}
+
+
+int
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+#else
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct proc *p);
+
+#endif
+
+#if !defined(_WIN32) && !defined(__Userspace__)
+#if defined(__FreeBSD__)
+static int
+sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+#elif defined(__APPLE__)
+static int
+sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct proc *p)
+{
+#else
+static int
+sctp6_send(struct socket *so, int flags, struct mbuf *m, struct mbuf *nam,
+ struct mbuf *control, struct proc *p)
+{
+ struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *): NULL;
+#endif
+ struct sctp_inpcb *inp;
+
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+#endif /* INET */
+ /* No SPL needed since sctp_output does this */
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ if (control) {
+ SCTP_RELEASE_PKT(control);
+ control = NULL;
+ }
+ SCTP_RELEASE_PKT(m);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ /*
+ * For the TCP model we may get a NULL addr, if we are a connected
+ * socket thats ok.
+ */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
+ (addr == NULL)) {
+ goto connected_type;
+ }
+ if (addr == NULL) {
+ SCTP_RELEASE_PKT(m);
+ if (control) {
+ SCTP_RELEASE_PKT(control);
+ control = NULL;
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EDESTADDRREQ);
+ return (EDESTADDRREQ);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ /*
+ * if IPV6_V6ONLY flag, we discard datagrams destined to a
+ * v4 addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ /* convert v4-mapped into v4 addr and send */
+ in6_sin6_2_sin(&sin, sin6);
+ return (sctp_sendm(so, flags, m, (struct sockaddr *)&sin, control, p));
+ }
+#endif /* INET */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ SCTP_PRINTF("huh? control set?\n");
+ SCTP_RELEASE_PKT(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ SCTP_BUF_NEXT(inp->pkt_last) = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
+ /* FreeBSD and MacOSX uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+#else
+ 1 /* Open BSD does not have any "more to come"
+ * indication */
+#endif
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD, NetBSD and FreeBSD have methods for
+ * re-defining sosend() to use sctp_sosend(). One can
+ * optionaly switch back to this code (by changing back the
+ * defininitions but this is not advisable.
+ */
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ int ret;
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+#endif
+
+#if defined(__Userspace__)
+int
+sctp6_connect(struct socket *so, struct sockaddr *addr)
+{
+ void *p = NULL;
+#elif defined(__FreeBSD__)
+static int
+sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#elif defined(__APPLE__)
+static int
+sctp6_connect(struct socket *so, struct sockaddr *addr, struct proc *p)
+{
+#elif defined(_WIN32)
+static int
+sctp6_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p)
+{
+#else
+static int
+sctp6_connect(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ struct epoch_tracker et;
+#endif
+ uint32_t vrf_id;
+ int error = 0;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+ union sctp_sockstore store;
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return (ECONNRESET); /* I made the same as TCP since we are
+ * not setup? */
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#if !(defined(_WIN32) && !defined(__Userspace__))
+ switch (addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+#endif
+
+ vrf_id = inp->def_vrf_id;
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ SCTP_INP_RUNLOCK(inp);
+ error = sctp6_bind(so, NULL, p);
+ if (error) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+
+ return (error);
+ }
+ SCTP_INP_RLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined to a v4
+ * addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ /* convert v4-mapped into v4 addr */
+ in6_sin6_2_sin(&store.sin, sin6);
+ addr = &store.sa;
+ }
+#endif /* INET */
+ /* Now do we connect? */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EALREADY);
+ return (EALREADY);
+ }
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+ inp->sctp_ep.pre_open_stream_count,
+ inp->sctp_ep.port, p,
+ SCTP_INITIALIZE_AUTH_PARAMS);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ return (error);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_ENTER(et);
+#endif
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+ NET_EPOCH_EXIT(et);
+#endif
+ return (error);
+}
+
+static int
+#if !defined(__Userspace__)
+sctp6_getaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6;
+#else
+sctp6_getaddr(struct socket *so, struct mbuf *nam)
+{
+ struct sockaddr_in6 *sin6 = mtod(nam, struct sockaddr_in6 *);
+#endif
+ struct sctp_inpcb *inp;
+ uint32_t vrf_id;
+ struct sctp_ifa *sctp_ifa;
+
+#if defined(SCTP_KAME) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ int error;
+#endif
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof(*sin6));
+ if (sin6 == NULL)
+ return (ENOMEM);
+#else
+ SCTP_BUF_LEN(nam) = sizeof(*sin6);
+ memset(sin6, 0, sizeof(*sin6));
+#endif
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(*sin6);
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return (ECONNRESET);
+ }
+ SCTP_INP_RLOCK(inp);
+ sin6->sin6_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* For the bound all case you get back 0 */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_nets *net;
+ int fnd;
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+ fnd = 0;
+ sin_a6 = NULL;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6 == NULL)
+ /* this will make coverity happy */
+ continue;
+
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a6 == NULL)) {
+ /* punt */
+ SCTP_INP_RUNLOCK(inp);
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+ vrf_id = inp->def_vrf_id;
+ sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id);
+ if (sctp_ifa) {
+ sin6->sin6_addr = sctp_ifa->address.sin6.sin6_addr;
+ }
+ } else {
+ /* For the bound all case you get back 0 */
+ memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr));
+ }
+ } else {
+ /* Take the first IPv6 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->address.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin_a;
+
+ sin_a = &laddr->ifa->address.sin6;
+ sin6->sin6_addr = sin_a->sin6_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ /* Scoping things for v6 */
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+ if ((error = sa6_recoverscope(sin6)) != 0) {
+ SCTP_FREE_SONAME(sin6);
+ return (error);
+ }
+#else
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
+ /* skip ifp check below */
+ in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+ else
+ sin6->sin6_scope_id = 0; /* XXX */
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#if !defined(__Userspace__)
+ (*addr) = (struct sockaddr *)sin6;
+#endif
+ return (0);
+}
+
+static int
+#if !defined(__Userspace__)
+sctp6_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6;
+#else
+sctp6_peeraddr(struct socket *so, struct mbuf *nam)
+{
+ struct sockaddr_in6 *sin6 = mtod(nam, struct sockaddr_in6 *);
+#endif
+ int fnd;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+#ifdef SCTP_KAME
+ int error;
+#endif
+
+ /* Do the malloc first in case it blocks. */
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL)
+ return (ENOMEM);
+#else
+ SCTP_BUF_LEN(nam) = sizeof(*sin6);
+ memset(sin6, 0, sizeof(*sin6));
+#endif
+ sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ sin6->sin6_len = sizeof(*sin6);
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp == NULL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /* UDP type and listeners will drop out here */
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return (ECONNRESET);
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ sin6->sin6_port = stcb->rport;
+ sin6->sin6_addr = sin_a6->sin6_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return (ENOENT);
+ }
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+ if ((error = sa6_recoverscope(sin6)) != 0) {
+#if !defined(__Userspace__)
+ SCTP_FREE_SONAME(sin6);
+#endif
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, error);
+ return (error);
+ }
+#else
+ in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#if !defined(__Userspace__)
+ *addr = (struct sockaddr *)sin6;
+#endif
+ return (0);
+}
+
+#if !defined(__Userspace__)
+static int
+sctp6_in6getaddr(struct socket *so, struct sockaddr **nam)
+{
+#elif defined(__Userspace__)
+int
+sctp6_in6getaddr(struct socket *so, struct mbuf *nam)
+{
+#ifdef INET
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+#else
+static int
+sctp6_in6getaddr(struct socket *so, struct mbuf *nam)
+{
+#ifdef INET
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+#endif
+ struct inpcb *inp = sotoinpcb(so);
+ int error;
+
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ /* allow v6 addresses precedence */
+ error = sctp6_getaddr(so, nam);
+#ifdef INET
+ if (error) {
+#if !defined(__Userspace__)
+ struct sockaddr_in6 *sin6;
+#else
+ struct sockaddr_in6 sin6;
+#endif
+
+ /* try v4 next if v6 failed */
+ error = sctp_ingetaddr(so, nam);
+ if (error) {
+ return (error);
+ }
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL) {
+ SCTP_FREE_SONAME(*nam);
+ return (ENOMEM);
+ }
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6);
+ SCTP_FREE_SONAME(*nam);
+ *nam = (struct sockaddr *)sin6;
+#else
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ SCTP_BUF_LEN(nam) = sizeof(struct sockaddr_in6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+#endif
+ }
+#endif
+ return (error);
+}
+
+
+#if !defined(__Userspace__)
+static int
+sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam)
+{
+#elif defined(__Userspace__)
+int
+sctp6_getpeeraddr(struct socket *so, struct mbuf *nam)
+{
+#ifdef INET
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+#else
+static
+int
+sctp6_getpeeraddr(struct socket *so, struct mbuf *nam)
+{
+#ifdef INET
+ struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+
+#endif
+ struct inpcb *inp = sotoinpcb(so);
+ int error;
+
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+
+ /* allow v6 addresses precedence */
+ error = sctp6_peeraddr(so, nam);
+#ifdef INET
+ if (error) {
+#if !defined(__Userspace__)
+ struct sockaddr_in6 *sin6;
+#else
+ struct sockaddr_in6 sin6;
+#endif
+
+ /* try v4 next if v6 failed */
+ error = sctp_peeraddr(so, nam);
+ if (error) {
+ return (error);
+ }
+#if !defined(__Userspace__)
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL) {
+ SCTP_FREE_SONAME(*nam);
+ return (ENOMEM);
+ }
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6);
+ SCTP_FREE_SONAME(*nam);
+ *nam = (struct sockaddr *)sin6;
+#else
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ SCTP_BUF_LEN(nam) = sizeof(struct sockaddr_in6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+#endif
+ }
+#endif
+ return (error);
+}
+
+#if !defined(__Userspace__)
+struct pr_usrreqs sctp6_usrreqs = {
+#if defined(__FreeBSD__)
+ .pru_abort = sctp6_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp6_attach,
+ .pru_bind = sctp6_bind,
+ .pru_connect = sctp6_connect,
+ .pru_control = in6_control,
+ .pru_close = sctp6_close,
+ .pru_detach = sctp6_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_flush = sctp_flush,
+ .pru_disconnect = sctp6_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp6_getpeeraddr,
+ .pru_send = sctp6_send,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp6_in6getaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+#elif defined(__APPLE__) && !defined(__Userspace__)
+ .pru_abort = sctp6_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp6_attach,
+ .pru_bind = sctp6_bind,
+ .pru_connect = sctp6_connect,
+ .pru_connect2 = pru_connect2_notsupp,
+ .pru_control = in6_control,
+ .pru_detach = sctp6_detach,
+ .pru_disconnect = sctp6_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp6_getpeeraddr,
+ .pru_rcvd = NULL,
+ .pru_rcvoob = pru_rcvoob_notsupp,
+ .pru_send = sctp6_send,
+ .pru_sense = pru_sense_null,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp6_in6getaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive,
+ .pru_sopoll = sopoll
+#elif defined(_WIN32) && !defined(__Userspace__)
+ sctp6_abort,
+ sctp_accept,
+ sctp6_attach,
+ sctp6_bind,
+ sctp6_connect,
+ pru_connect2_notsupp,
+ NULL,
+ NULL,
+ sctp6_disconnect,
+ sctp_listen,
+ sctp6_getpeeraddr,
+ NULL,
+ pru_rcvoob_notsupp,
+ NULL,
+ pru_sense_null,
+ sctp_shutdown,
+ sctp_flush,
+ sctp6_in6getaddr,
+ sctp_sosend,
+ sctp_soreceive,
+ sopoll_generic,
+ NULL,
+ sctp6_close
+#endif
+};
+
+#elif !defined(__Userspace__)
+int
+sctp6_usrreq(so, req, m, nam, control, p)
+ struct socket *so;
+ int req;
+ struct mbuf *m, *nam, *control;
+ struct proc *p;
+{
+ int error;
+ int family;
+
+ family = so->so_proto->pr_domain->dom_family;
+
+ if (req == PRU_CONTROL) {
+ switch (family) {
+ case PF_INET:
+ error = in_control(so, (long)m, (caddr_t)nam,
+ (struct ifnet *)control );
+ break;
+#ifdef INET6
+ case PF_INET6:
+ error = in6_control(so, (long)m, (caddr_t)nam,
+ (struct ifnet *)control, p);
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ }
+ return (error);
+ }
+ switch (req) {
+ case PRU_ATTACH:
+ error = sctp6_attach(so, family, p);
+ break;
+ case PRU_DETACH:
+ error = sctp6_detach(so);
+ break;
+ case PRU_BIND:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp6_bind(so, nam, p);
+ break;
+ case PRU_LISTEN:
+ error = sctp_listen(so, p);
+ break;
+ case PRU_CONNECT:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp6_connect(so, nam, p);
+ break;
+ case PRU_DISCONNECT:
+ error = sctp6_disconnect(so);
+ break;
+ case PRU_ACCEPT:
+ if (nam == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = sctp_accept(so, nam);
+ break;
+ case PRU_SHUTDOWN:
+ error = sctp_shutdown(so);
+ break;
+
+ case PRU_RCVD:
+ /*
+ * For OpenBSD and NetBSD, this is real ugly. The (mbuf *)
+ * nam that is passed (by soreceive()) is the int flags cast
+ * as a (mbuf *) yuck!
+ */
+ error = sctp_usr_recvd(so, (int)((long)nam));
+ break;
+
+ case PRU_SEND:
+ /* Flags are ignored */
+ error = sctp6_send(so, 0, m, nam, control, p);
+ break;
+ case PRU_ABORT:
+ error = sctp6_abort(so);
+ break;
+
+ case PRU_SENSE:
+ error = 0;
+ break;
+ case PRU_RCVOOB:
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ break;
+ case PRU_SENDOOB:
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ break;
+ case PRU_PEERADDR:
+ error = sctp6_getpeeraddr(so, nam);
+ break;
+ case PRU_SOCKADDR:
+ error = sctp6_in6getaddr(so, nam);
+ break;
+ case PRU_SLOWTIMO:
+ error = 0;
+ break;
+ default:
+ error = 0;
+ break;
+ }
+ return (error);
+}
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_var.h b/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_var.h
new file mode 100644
index 000000000..56a6c3af3
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/netinet6/sctp6_var.h
@@ -0,0 +1,81 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && !defined(__Userspace__)
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet6/sctp6_var.h 317457 2017-04-26 19:26:40Z tuexen $");
+#endif
+
+#ifndef _NETINET6_SCTP6_VAR_H_
+#define _NETINET6_SCTP6_VAR_H_
+
+#if defined(__Userspace__)
+#ifdef INET
+extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *);
+extern void in6_sin6_2_sin_in_sock(struct sockaddr *);
+extern void in6_sin_2_v4mapsin6(const struct sockaddr_in *, struct sockaddr_in6 *);
+#endif
+#endif
+#if defined(_KERNEL)
+
+#if !defined(__Userspace__)
+SYSCTL_DECL(_net_inet6_sctp6);
+extern struct pr_usrreqs sctp6_usrreqs;
+#else
+int sctp6_usrreq(struct socket *, int, struct mbuf *, struct mbuf *, struct mbuf *);
+#endif
+
+#if defined(__APPLE__) && !defined(__Userspace__)
+int sctp6_input(struct mbuf **, int *);
+int sctp6_input_with_port(struct mbuf **, int *, uint16_t);
+#else
+int sctp6_input(struct mbuf **, int *, int);
+int sctp6_input_with_port(struct mbuf **, int *, uint16_t);
+#endif
+int sctp6_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct proc *);
+#if defined(__APPLE__) && !defined(__Userspace__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN)
+void sctp6_ctlinput(int, struct sockaddr *, void *, struct ifnet * SCTP_UNUSED);
+#else
+void sctp6_ctlinput(int, struct sockaddr *, void *);
+#endif
+#if !((defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__))
+extern void in6_sin_2_v4mapsin6(struct sockaddr_in *, struct sockaddr_in6 *);
+extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *);
+extern void in6_sin6_2_sin_in_sock(struct sockaddr *);
+#endif
+void sctp6_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *,
+ uint8_t, uint8_t, uint32_t);
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_atomic.h b/ext/sctp/usrsctp/usrsctplib/user_atomic.h
new file mode 100644
index 000000000..6a59587ef
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_atomic.h
@@ -0,0 +1,315 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USER_ATOMIC_H_
+#define _USER_ATOMIC_H_
+
+/* __Userspace__ version of sys/i386/include/atomic.h goes here */
+
+/* TODO In the future, might want to not use i386 specific assembly.
+ * The options include:
+ * - implement them generically (but maybe not truly atomic?) in userspace
+ * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+
+#if defined(__APPLE__) || defined(_WIN32)
+#if defined(_WIN32)
+#define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
+#define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
+#define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
+#define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
+#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
+#else
+#include <libkern/OSAtomic.h>
+#define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
+#define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
+#define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
+#define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
+#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
+#endif
+
+#if defined(INVARIANTS)
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t newval; \
+ newval = atomic_fetchadd_int(addr, -val); \
+ if (newval < 0) { \
+ panic("Counter goes negative"); \
+ } \
+}
+#else
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t newval; \
+ newval = atomic_fetchadd_int(addr, -val); \
+ if (newval < 0) { \
+ *addr = 0; \
+ } \
+}
+#endif
+#if defined(_WIN32)
+static void atomic_init(void) {} /* empty when we are not using atomic_mtx */
+#else
+static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
+#endif
+
+#else
+/* Using gcc built-in functions for atomic memory operations
+ Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
+ Requires gcc version 4.1.0
+ compile with -march=i486
+ */
+
+/*Atomically add V to *P.*/
+#define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V)
+
+/*Atomically subtrace V from *P.*/
+#define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+#define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
+
+/* Following explanation from src/sys/i386/include/atomic.h,
+ * for atomic compare and set
+ *
+ * if (*dst == exp) *dst = src (all 32 bit words)
+ *
+ * Returns 0 on failure, non-zero on success
+ */
+
+#define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
+
+#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
+#if defined(INVARIANTS)
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ panic("Counter goes negative"); \
+ } \
+}
+#else
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ *addr = 0; \
+ } \
+}
+#endif
+static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
+#endif
+
+#if 0 /* using libatomic_ops */
+#include "user_include/atomic_ops.h"
+
+/*Atomically add incr to *P, and return the original value of *P.*/
+#define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V)
+
+#define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+#define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
+
+/* Atomically compare *addr to old_val, and replace *addr by new_val
+ if the first comparison succeeds. Returns nonzero if the comparison
+ succeeded and *addr was updated.
+*/
+/* Following Explanation from src/sys/i386/include/atomic.h, which
+ matches that of AO_compare_and_swap above.
+ * Atomic compare and set, used by the mutex functions
+ *
+ * if (*dst == exp) *dst = src (all 32 bit words)
+ *
+ * Returns 0 on failure, non-zero on success
+ */
+
+#define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
+
+static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
+#endif /* closing #if for libatomic */
+
+#if 0 /* using atomic_mtx */
+
+#include <pthread.h>
+
+extern userland_mutex_t atomic_mtx;
+
+#if defined(_WIN32)
+static inline void atomic_init() {
+ InitializeCriticalSection(&atomic_mtx);
+}
+static inline void atomic_destroy() {
+ DeleteCriticalSection(&atomic_mtx);
+}
+static inline void atomic_lock() {
+ EnterCriticalSection(&atomic_mtx);
+}
+static inline void atomic_unlock() {
+ LeaveCriticalSection(&atomic_mtx);
+}
+#else
+static inline void atomic_init() {
+ pthread_mutexattr_t mutex_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+#ifdef INVARIANTS
+ pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
+#endif
+ pthread_mutex_init(&accept_mtx, &mutex_attr);
+ pthread_mutexattr_destroy(&mutex_attr);
+}
+static inline void atomic_destroy() {
+ (void)pthread_mutex_destroy(&atomic_mtx);
+}
+static inline void atomic_lock() {
+#ifdef INVARIANTS
+ KASSERT(pthread_mutex_lock(&atomic_mtx) == 0, ("atomic_lock: atomic_mtx already locked"))
+#else
+ (void)pthread_mutex_lock(&atomic_mtx);
+#endif
+}
+static inline void atomic_unlock() {
+#ifdef INVARIANTS
+ KASSERT(pthread_mutex_unlock(&atomic_mtx) == 0, ("atomic_unlock: atomic_mtx not locked"))
+#else
+ (void)pthread_mutex_unlock(&atomic_mtx);
+#endif
+}
+#endif
+/*
+ * For userland, always use lock prefixes so that the binaries will run
+ * on both SMP and !SMP systems.
+ */
+
+#define MPLOCKED "lock ; "
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline u_int
+atomic_fetchadd_int(volatile void *n, u_int v)
+{
+ int *p = (int *) n;
+ atomic_lock();
+ __asm __volatile(
+ " " MPLOCKED " "
+ " xaddl %0, %1 ; "
+ "# atomic_fetchadd_int"
+ : "+r" (v), /* 0 (result) */
+ "=m" (*p) /* 1 */
+ : "m" (*p)); /* 2 */
+ atomic_unlock();
+
+ return (v);
+}
+
+
+#ifdef CPU_DISABLE_CMPXCHG
+
+static __inline int
+atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
+{
+ u_char res;
+
+ atomic_lock();
+ __asm __volatile(
+ " pushfl ; "
+ " cli ; "
+ " cmpl %3,%4 ; "
+ " jne 1f ; "
+ " movl %2,%1 ; "
+ "1: "
+ " sete %0 ; "
+ " popfl ; "
+ "# atomic_cmpset_int"
+ : "=q" (res), /* 0 */
+ "=m" (*dst) /* 1 */
+ : "r" (src), /* 2 */
+ "r" (exp), /* 3 */
+ "m" (*dst) /* 4 */
+ : "memory");
+ atomic_unlock();
+
+ return (res);
+}
+
+#else /* !CPU_DISABLE_CMPXCHG */
+
+static __inline int
+atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
+{
+ atomic_lock();
+ u_char res;
+
+ __asm __volatile(
+ " " MPLOCKED " "
+ " cmpxchgl %2,%1 ; "
+ " sete %0 ; "
+ "1: "
+ "# atomic_cmpset_int"
+ : "=a" (res), /* 0 */
+ "=m" (*dst) /* 1 */
+ : "r" (src), /* 2 */
+ "a" (exp), /* 3 */
+ "m" (*dst) /* 4 */
+ : "memory");
+ atomic_unlock();
+
+ return (res);
+}
+
+#endif /* CPU_DISABLE_CMPXCHG */
+
+#define atomic_add_int(P, V) do { \
+ atomic_lock(); \
+ (*(u_int *)(P) += (V)); \
+ atomic_unlock(); \
+} while(0)
+#define atomic_subtract_int(P, V) do { \
+ atomic_lock(); \
+ (*(u_int *)(P) -= (V)); \
+ atomic_unlock(); \
+} while(0)
+
+#endif
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_environment.c b/ext/sctp/usrsctp/usrsctplib/user_environment.c
new file mode 100644
index 000000000..dc9d98a39
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_environment.c
@@ -0,0 +1,132 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* __Userspace__ */
+
+#include <stdlib.h>
+#if !defined(_WIN32)
+#include <stdint.h>
+#include <netinet/sctp_os_userspace.h>
+#endif
+#include <user_environment.h>
+#include <sys/types.h>
+/* #include <sys/param.h> defines MIN */
+#if !defined(MIN)
+#define MIN(arg1,arg2) ((arg1) < (arg2) ? (arg1) : (arg2))
+#endif
+#include <string.h>
+
+#define uHZ 1000
+
+/* See user_include/user_environment.h for comments about these variables */
+int maxsockets = 25600;
+int hz = uHZ;
+int ip_defttl = 64;
+int ipport_firstauto = 49152, ipport_lastauto = 65535;
+int nmbclusters = 65536;
+
+/* Source ip_output.c. extern'd in ip_var.h */
+u_short ip_id = 0; /*__Userspace__ TODO Should it be initialized to zero? */
+
+/* used in user_include/user_atomic.h in order to make the operations
+ * defined there truly atomic
+ */
+userland_mutex_t atomic_mtx;
+
+/* If the entropy device is not loaded, make a token effort to
+ * provide _some_ kind of randomness. This should only be used
+ * inside other RNG's, like arc4random(9).
+ */
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+void
+init_random(void)
+{
+ return;
+}
+
+int
+read_random(void *buf, int count)
+{
+ memset(buf, 'A', count);
+ return (count);
+}
+#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__OpenBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+void
+init_random(void)
+{
+ return;
+}
+
+int
+read_random(void *buf, int count)
+{
+ if (count >= 0) {
+ arc4random_buf(buf, count);
+ }
+ return (count);
+}
+#else
+void
+init_random(void)
+{
+ struct timeval now;
+ unsigned int seed;
+
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ seed = 0;
+ seed |= (unsigned int)now.tv_sec;
+ seed |= (unsigned int)now.tv_usec;
+#if !defined(_WIN32) &&! defined(__native_client__)
+ seed |= getpid();
+#endif
+#if defined(_WIN32) || defined(__native_client__)
+ srand(seed);
+#else
+ srandom(seed);
+#endif
+ return;
+}
+
+int
+read_random(void *buf, int count)
+{
+ uint32_t randval;
+ int size, i;
+
+ /* Fill buf[] with random(9) output */
+ for (i = 0; i < count; i+= (int)sizeof(uint32_t)) {
+ randval = random();
+ size = MIN(count - i, (int)sizeof(uint32_t));
+ memcpy(&((char *)buf)[i], &randval, (size_t)size);
+ }
+
+ return (count);
+}
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_environment.h b/ext/sctp/usrsctp/usrsctplib/user_environment.h
new file mode 100644
index 000000000..0c2ec9db2
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_environment.h
@@ -0,0 +1,118 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USER_ENVIRONMENT_H_
+#define _USER_ENVIRONMENT_H_
+/* __Userspace__ */
+#include <sys/types.h>
+
+#ifdef __FreeBSD__
+#ifndef _SYS_MUTEX_H_
+#include <sys/mutex.h>
+#endif
+#endif
+#if defined(_WIN32)
+#include "netinet/sctp_os_userspace.h"
+#endif
+
+/* maxsockets is used in SCTP_ZONE_INIT call. It refers to
+ * kern.ipc.maxsockets kernel environment variable.
+ */
+extern int maxsockets;
+
+/* int hz; is declared in sys/kern/subr_param.c and refers to kernel timer frequency.
+ * See http://ivoras.sharanet.org/freebsd/vmware.html for additional info about kern.hz
+ * hz is initialized in void init_param1(void) in that file.
+ */
+extern int hz;
+
+
+/* The following two ints define a range of available ephemeral ports. */
+extern int ipport_firstauto, ipport_lastauto;
+
+/* nmbclusters is used in sctp_usrreq.c (e.g., sctp_init). In the FreeBSD kernel,
+ * this is 1024 + maxusers * 64.
+ */
+extern int nmbclusters;
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#define min(a,b) (((a)>(b))?(b):(a))
+#define max(a,b) (((a)>(b))?(a):(b))
+#endif
+
+void init_random(void);
+int read_random(void *, int);
+
+/* errno's may differ per OS. errno.h now included in sctp_os_userspace.h */
+/* Source: /usr/src/sys/sys/errno.h */
+/* #define ENOSPC 28 */ /* No space left on device */
+/* #define ENOBUFS 55 */ /* No buffer space available */
+/* #define ENOMEM 12 */ /* Cannot allocate memory */
+/* #define EACCES 13 */ /* Permission denied */
+/* #define EFAULT 14 */ /* Bad address */
+/* #define EHOSTDOWN 64 */ /* Host is down */
+/* #define EHOSTUNREACH 65 */ /* No route to host */
+
+/* Source ip_output.c. extern'd in ip_var.h */
+extern u_short ip_id;
+
+#if defined(__linux__)
+#define IPV6_VERSION 0x60
+#endif
+
+#if defined(INVARIANTS)
+#include <stdlib.h>
+
+static inline void
+terminate_non_graceful(void) {
+ abort();
+}
+
+#define panic(...) \
+ do { \
+ SCTP_PRINTF("%s(): ", __func__); \
+ SCTP_PRINTF(__VA_ARGS__); \
+ SCTP_PRINTF("\n"); \
+ terminate_non_graceful(); \
+} while (0)
+
+#define KASSERT(cond, args) \
+ do { \
+ if (!(cond)) { \
+ panic args ; \
+ } \
+ } while (0)
+#else
+#define KASSERT(cond, args)
+#endif
+
+/* necessary for sctp_pcb.c */
+extern int ip_defttl;
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_inpcb.h b/ext/sctp/usrsctp/usrsctplib/user_inpcb.h
new file mode 100644
index 000000000..35d59c858
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_inpcb.h
@@ -0,0 +1,373 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD: src/sys/netinet/in_pcb.h,v 1.100.2.1 2007/12/07 05:46:08 kmacy Exp $
+ */
+
+#ifndef _USER_INPCB_H_
+#define _USER_INPCB_H_
+
+#include <user_route.h> /* was <net/route.h> */
+
+struct inpcbpolicy;
+
+/*
+ * Struct inpcb is the ommon structure pcb for the Internet Protocol
+ * implementation.
+ *
+ * Pointers to local and foreign host table entries, local and foreign socket
+ * numbers, and pointers up (to a socket structure) and down (to a
+ * protocol-specific control block) are stored here.
+ */
+LIST_HEAD(inpcbhead, inpcb);
+LIST_HEAD(inpcbporthead, inpcbport);
+
+/*
+ * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet.
+ * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing
+ * the following structure.
+ */
+struct in_addr_4in6 {
+ uint32_t ia46_pad32[3];
+ struct in_addr ia46_addr4;
+};
+
+/*
+ * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553. in_conninfo has
+ * some extra padding to accomplish this.
+ */
+struct in_endpoints {
+ uint16_t ie_fport; /* foreign port */
+ uint16_t ie_lport; /* local port */
+ /* protocol dependent part, local and foreign addr */
+ union {
+ /* foreign host table entry */
+ struct in_addr_4in6 ie46_foreign;
+ struct in6_addr ie6_foreign;
+ } ie_dependfaddr;
+ union {
+ /* local host table entry */
+ struct in_addr_4in6 ie46_local;
+ struct in6_addr ie6_local;
+ } ie_dependladdr;
+#define ie_faddr ie_dependfaddr.ie46_foreign.ia46_addr4
+#define ie_laddr ie_dependladdr.ie46_local.ia46_addr4
+#define ie6_faddr ie_dependfaddr.ie6_foreign
+#define ie6_laddr ie_dependladdr.ie6_local
+};
+
+/*
+ * XXX The defines for inc_* are hacks and should be changed to direct
+ * references.
+ */
+struct in_conninfo {
+ uint8_t inc_flags;
+ uint8_t inc_len;
+ uint16_t inc_pad; /* XXX alignment for in_endpoints */
+ /* protocol dependent part */
+ struct in_endpoints inc_ie;
+};
+#define inc_isipv6 inc_flags /* temp compatibility */
+#define inc_fport inc_ie.ie_fport
+#define inc_lport inc_ie.ie_lport
+#define inc_faddr inc_ie.ie_faddr
+#define inc_laddr inc_ie.ie_laddr
+#define inc6_faddr inc_ie.ie6_faddr
+#define inc6_laddr inc_ie.ie6_laddr
+
+struct icmp6_filter;
+
+struct inpcb {
+ LIST_ENTRY(inpcb) inp_hash; /* hash list */
+ LIST_ENTRY(inpcb) inp_list; /* list for all PCBs of this proto */
+ void *inp_ppcb; /* pointer to per-protocol pcb */
+ struct inpcbinfo *inp_pcbinfo; /* PCB list info */
+ struct socket *inp_socket; /* back pointer to socket */
+
+ uint32_t inp_flow;
+ int inp_flags; /* generic IP/datagram flags */
+
+ u_char inp_vflag; /* IP version flag (v4/v6) */
+#define INP_IPV4 0x1
+#define INP_IPV6 0x2
+#define INP_IPV6PROTO 0x4 /* opened under IPv6 protocol */
+#define INP_TIMEWAIT 0x8 /* .. probably doesn't go here */
+#define INP_ONESBCAST 0x10 /* send all-ones broadcast */
+#define INP_DROPPED 0x20 /* protocol drop flag */
+#define INP_SOCKREF 0x40 /* strong socket reference */
+#define INP_CONN 0x80
+ u_char inp_ip_ttl; /* time to live proto */
+ u_char inp_ip_p; /* protocol proto */
+ u_char inp_ip_minttl; /* minimum TTL or drop */
+ uint32_t inp_ispare1; /* connection id / queue id */
+ void *inp_pspare[2]; /* rtentry / general use */
+
+ /* Local and foreign ports, local and foreign addr. */
+ struct in_conninfo inp_inc;
+
+ /* list for this PCB's local port */
+ struct label *inp_label; /* MAC label */
+ struct inpcbpolicy *inp_sp; /* for IPSEC */
+
+ /* Protocol-dependent part; options. */
+ struct {
+ u_char inp4_ip_tos; /* type of service proto */
+ struct mbuf *inp4_options; /* IP options */
+ struct ip_moptions *inp4_moptions; /* IP multicast options */
+ } inp_depend4;
+#define inp_fport inp_inc.inc_fport
+#define inp_lport inp_inc.inc_lport
+#define inp_faddr inp_inc.inc_faddr
+#define inp_laddr inp_inc.inc_laddr
+#define inp_ip_tos inp_depend4.inp4_ip_tos
+#define inp_options inp_depend4.inp4_options
+#define inp_moptions inp_depend4.inp4_moptions
+ struct {
+ /* IP options */
+ struct mbuf *inp6_options;
+ /* IP6 options for outgoing packets */
+ struct ip6_pktopts *inp6_outputopts;
+ /* IP multicast options */
+#if 0
+ struct ip6_moptions *inp6_moptions;
+#endif
+ /* ICMPv6 code type filter */
+ struct icmp6_filter *inp6_icmp6filt;
+ /* IPV6_CHECKSUM setsockopt */
+ int inp6_cksum;
+ short inp6_hops;
+ } inp_depend6;
+ LIST_ENTRY(inpcb) inp_portlist;
+ struct inpcbport *inp_phd; /* head of this list */
+#define inp_zero_size offsetof(struct inpcb, inp_gencnt)
+ struct mtx inp_mtx;
+
+#define in6p_faddr inp_inc.inc6_faddr
+#define in6p_laddr inp_inc.inc6_laddr
+#define in6p_hops inp_depend6.inp6_hops /* default hop limit */
+#define in6p_ip6_nxt inp_ip_p
+#define in6p_flowinfo inp_flow
+#define in6p_vflag inp_vflag
+#define in6p_options inp_depend6.inp6_options
+#define in6p_outputopts inp_depend6.inp6_outputopts
+#if 0
+#define in6p_moptions inp_depend6.inp6_moptions
+#endif
+#define in6p_icmp6filt inp_depend6.inp6_icmp6filt
+#define in6p_cksum inp_depend6.inp6_cksum
+#define in6p_flags inp_flags /* for KAME src sync over BSD*'s */
+#define in6p_socket inp_socket /* for KAME src sync over BSD*'s */
+#define in6p_lport inp_lport /* for KAME src sync over BSD*'s */
+#define in6p_fport inp_fport /* for KAME src sync over BSD*'s */
+#define in6p_ppcb inp_ppcb /* for KAME src sync over BSD*'s */
+};
+/*
+ * The range of the generation count, as used in this implementation, is 9e19.
+ * We would have to create 300 billion connections per second for this number
+ * to roll over in a year. This seems sufficiently unlikely that we simply
+ * don't concern ourselves with that possibility.
+ */
+
+struct inpcbport {
+ LIST_ENTRY(inpcbport) phd_hash;
+ struct inpcbhead phd_pcblist;
+ u_short phd_port;
+};
+
+/*
+ * Global data structure for each high-level protocol (UDP, TCP, ...) in both
+ * IPv4 and IPv6. Holds inpcb lists and information for managing them.
+ */
+struct inpcbinfo {
+ /*
+ * Global list of inpcbs on the protocol.
+ */
+ struct inpcbhead *ipi_listhead;
+ u_int ipi_count;
+
+ /*
+ * Global hash of inpcbs, hashed by local and foreign addresses and
+ * port numbers.
+ */
+ struct inpcbhead *ipi_hashbase;
+ u_long ipi_hashmask;
+
+ /*
+ * Global hash of inpcbs, hashed by only local port number.
+ */
+ struct inpcbporthead *ipi_porthashbase;
+ u_long ipi_porthashmask;
+
+ /*
+ * Fields associated with port lookup and allocation.
+ */
+ u_short ipi_lastport;
+ u_short ipi_lastlow;
+ u_short ipi_lasthi;
+
+ /*
+ * UMA zone from which inpcbs are allocated for this protocol.
+ */
+ struct uma_zone *ipi_zone;
+
+ /*
+ * Generation count--incremented each time a connection is allocated
+ * or freed.
+ */
+ struct mtx ipi_mtx;
+
+ /*
+ * vimage 1
+ * general use 1
+ */
+ void *ipi_pspare[2];
+};
+
+#define INP_LOCK_INIT(inp, d, t) \
+ mtx_init(&(inp)->inp_mtx, (d), (t), MTX_DEF | MTX_RECURSE | MTX_DUPOK)
+#define INP_LOCK_DESTROY(inp) mtx_destroy(&(inp)->inp_mtx)
+#define INP_LOCK(inp) mtx_lock(&(inp)->inp_mtx)
+#define INP_UNLOCK(inp) mtx_unlock(&(inp)->inp_mtx)
+#define INP_LOCK_ASSERT(inp) mtx_assert(&(inp)->inp_mtx, MA_OWNED)
+#define INP_UNLOCK_ASSERT(inp) mtx_assert(&(inp)->inp_mtx, MA_NOTOWNED)
+
+#define INP_INFO_LOCK_INIT(ipi, d) \
+ mtx_init(&(ipi)->ipi_mtx, (d), NULL, MTX_DEF | MTX_RECURSE)
+#define INP_INFO_LOCK_DESTROY(ipi) mtx_destroy(&(ipi)->ipi_mtx)
+#define INP_INFO_RLOCK(ipi) mtx_lock(&(ipi)->ipi_mtx)
+#define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_mtx)
+#define INP_INFO_RUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_mtx)
+#define INP_INFO_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_mtx)
+#define INP_INFO_RLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_OWNED)
+#define INP_INFO_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_OWNED)
+#define INP_INFO_UNLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_NOTOWNED)
+
+#define INP_PCBHASH(faddr, lport, fport, mask) \
+ (((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))
+#define INP_PCBPORTHASH(lport, mask) \
+ (ntohs((lport)) & (mask))
+
+/* flags in inp_flags: */
+#define INP_RECVOPTS 0x01 /* receive incoming IP options */
+#define INP_RECVRETOPTS 0x02 /* receive IP options for reply */
+#define INP_RECVDSTADDR 0x04 /* receive IP dst address */
+#define INP_HDRINCL 0x08 /* user supplies entire IP header */
+#define INP_HIGHPORT 0x10 /* user wants "high" port binding */
+#define INP_LOWPORT 0x20 /* user wants "low" port binding */
+#define INP_ANONPORT 0x40 /* port chosen for user */
+#define INP_RECVIF 0x80 /* receive incoming interface */
+#define INP_MTUDISC 0x100 /* user can do MTU discovery */
+#define INP_FAITH 0x200 /* accept FAITH'ed connections */
+#define INP_RECVTTL 0x400 /* receive incoming IP TTL */
+#define INP_DONTFRAG 0x800 /* don't fragment packet */
+
+#define IN6P_IPV6_V6ONLY 0x008000 /* restrict AF_INET6 socket for v6 */
+
+#define IN6P_PKTINFO 0x010000 /* receive IP6 dst and I/F */
+#define IN6P_HOPLIMIT 0x020000 /* receive hoplimit */
+#define IN6P_HOPOPTS 0x040000 /* receive hop-by-hop options */
+#define IN6P_DSTOPTS 0x080000 /* receive dst options after rthdr */
+#define IN6P_RTHDR 0x100000 /* receive routing header */
+#define IN6P_RTHDRDSTOPTS 0x200000 /* receive dstoptions before rthdr */
+#define IN6P_TCLASS 0x400000 /* receive traffic class value */
+#define IN6P_AUTOFLOWLABEL 0x800000 /* attach flowlabel automatically */
+#define IN6P_RFC2292 0x40000000 /* used RFC2292 API on the socket */
+#define IN6P_MTU 0x80000000 /* receive path MTU */
+
+#define INP_CONTROLOPTS (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\
+ INP_RECVIF|INP_RECVTTL|\
+ IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\
+ IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\
+ IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\
+ IN6P_MTU)
+#define INP_UNMAPPABLEOPTS (IN6P_HOPOPTS|IN6P_DSTOPTS|IN6P_RTHDR|\
+ IN6P_TCLASS|IN6P_AUTOFLOWLABEL)
+
+ /* for KAME src sync over BSD*'s */
+#define IN6P_HIGHPORT INP_HIGHPORT
+#define IN6P_LOWPORT INP_LOWPORT
+#define IN6P_ANONPORT INP_ANONPORT
+#define IN6P_RECVIF INP_RECVIF
+#define IN6P_MTUDISC INP_MTUDISC
+#define IN6P_FAITH INP_FAITH
+#define IN6P_CONTROLOPTS INP_CONTROLOPTS
+ /*
+ * socket AF version is {newer than,or include}
+ * actual datagram AF version
+ */
+
+#define INPLOOKUP_WILDCARD 1
+#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb)
+
+#define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
+
+#define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af)
+
+extern int ipport_reservedhigh;
+extern int ipport_reservedlow;
+extern int ipport_lowfirstauto;
+extern int ipport_lowlastauto;
+extern int ipport_firstauto;
+extern int ipport_lastauto;
+extern int ipport_hifirstauto;
+extern int ipport_hilastauto;
+extern struct callout ipport_tick_callout;
+
+void in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *);
+int in_pcballoc(struct socket *, struct inpcbinfo *);
+int in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *);
+int in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *);
+void in_pcbdetach(struct inpcb *);
+void in_pcbdisconnect(struct inpcb *);
+void in_pcbdrop(struct inpcb *);
+void in_pcbfree(struct inpcb *);
+int in_pcbinshash(struct inpcb *);
+struct inpcb *
+ in_pcblookup_local(struct inpcbinfo *,
+ struct in_addr, u_int, int);
+struct inpcb *
+ in_pcblookup_hash(struct inpcbinfo *, struct in_addr, u_int,
+ struct in_addr, u_int, int, struct ifnet *);
+void in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr,
+ int, struct inpcb *(*)(struct inpcb *, int));
+void in_pcbrehash(struct inpcb *);
+void in_pcbsetsolabel(struct socket *so);
+int in_getpeeraddr(struct socket *so, struct sockaddr **nam);
+int in_getsockaddr(struct socket *so, struct sockaddr **nam);
+void in_pcbsosetlabel(struct socket *so);
+void in_pcbremlists(struct inpcb *inp);
+void ipport_tick(void *xtp);
+
+/*
+ * Debugging routines compiled in when DDB is present.
+ */
+void db_print_inpcb(struct inpcb *inp, const char *name, int indent);
+
+
+#endif /* !_NETINET_IN_PCB_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/user_ip6_var.h b/ext/sctp/usrsctp/usrsctplib/user_ip6_var.h
new file mode 100644
index 000000000..e1c8f2415
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_ip6_var.h
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _USER_IP6_VAR_H_
+#define _USER_IP6_VAR_H_
+
+#if defined(_WIN32)
+struct ip6_hdr {
+ union {
+ struct ip6_hdrctl {
+ uint32_t ip6_un1_flow; /* 20 bits of flow-ID */
+ uint16_t ip6_un1_plen; /* payload length */
+ uint8_t ip6_un1_nxt; /* next header */
+ uint8_t ip6_un1_hlim; /* hop limit */
+ } ip6_un1;
+ uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */
+ } ip6_ctlun;
+ struct in6_addr ip6_src; /* source address */
+ struct in6_addr ip6_dst; /* destination address */
+};
+#define ip6_vfc ip6_ctlun.ip6_un2_vfc
+#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow
+#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
+#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
+#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim
+#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim
+
+#define IPV6_VERSION 0x60
+#endif
+
+#if defined(_WIN32)
+#define s6_addr16 u.Word
+#endif
+#if !defined(_WIN32)
+#if !defined(__linux__)
+#define s6_addr8 __u6_addr.__u6_addr8
+#define s6_addr16 __u6_addr.__u6_addr16
+#define s6_addr32 __u6_addr.__u6_addr32
+#endif
+#endif
+
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__DragonFly__)
+struct route_in6 {
+ struct rtentry *ro_rt;
+ struct llentry *ro_lle;
+ struct in6_addr *ro_ia6;
+ int ro_flags;
+ struct sockaddr_in6 ro_dst;
+};
+#endif
+#define IP6_EXTHDR_GET(val, typ, m, off, len) \
+do { \
+ struct mbuf *t; \
+ int tmp; \
+ if ((m)->m_len >= (off) + (len)) \
+ (val) = (typ)(mtod((m), caddr_t) + (off)); \
+ else { \
+ t = m_pulldown((m), (off), (len), &tmp); \
+ if (t) { \
+ KASSERT(t->m_len >= tmp + (len), \
+ ("m_pulldown malfunction")); \
+ (val) = (typ)(mtod(t, caddr_t) + tmp); \
+ } else { \
+ (val) = (typ)NULL; \
+ (m) = NULL; \
+ } \
+ } \
+} while (0)
+
+#endif /* !_USER_IP6_VAR_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/user_ip_icmp.h b/ext/sctp/usrsctp/usrsctplib/user_ip_icmp.h
new file mode 100644
index 000000000..049314e20
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_ip_icmp.h
@@ -0,0 +1,225 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _USER_IP_ICMP_H_
+#define _USER_IP_ICMP_H_
+
+/*
+ * Interface Control Message Protocol Definitions.
+ * Per RFC 792, September 1981.
+ */
+
+/*
+ * Internal of an ICMP Router Advertisement
+ */
+struct icmp_ra_addr {
+ uint32_t ira_addr;
+ uint32_t ira_preference;
+};
+
+/*
+ * Structure of an icmp header.
+ */
+struct icmphdr {
+ u_char icmp_type; /* type of message, see below */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+};
+
+#if defined(_WIN32)
+#pragma pack (push, 1)
+struct icmp6_hdr {
+ uint8_t icmp6_type;
+ uint8_t icmp6_code;
+ uint16_t icmp6_cksum;
+ union {
+ uint32_t icmp6_un_data32[1];
+ uint16_t icmp6_un_data16[2];
+ uint8_t icmp6_un_data8[4];
+ } icmp6_dataun;
+};
+#pragma pack(pop)
+
+#define icmp6_data32 icmp6_dataun.icmp6_un_data32
+#define icmp6_mtu icmp6_data32[0]
+#endif
+
+/*
+ * Structure of an icmp packet.
+ *
+ * XXX: should start with a struct icmphdr.
+ */
+struct icmp {
+ u_char icmp_type; /* type of message, see below */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+ union {
+ u_char ih_pptr; /* ICMP_PARAMPROB */
+ struct in_addr ih_gwaddr; /* ICMP_REDIRECT */
+ struct ih_idseq {
+ uint16_t icd_id; /* network format */
+ uint16_t icd_seq; /* network format */
+ } ih_idseq;
+ int ih_void;
+
+ /* ICMP_UNREACH_NEEDFRAG -- Path MTU Discovery (RFC1191) */
+ struct ih_pmtu {
+ uint16_t ipm_void; /* network format */
+ uint16_t ipm_nextmtu; /* network format */
+ } ih_pmtu;
+
+ struct ih_rtradv {
+ u_char irt_num_addrs;
+ u_char irt_wpa;
+ uint16_t irt_lifetime;
+ } ih_rtradv;
+ } icmp_hun;
+#define icmp_pptr icmp_hun.ih_pptr
+#define icmp_gwaddr icmp_hun.ih_gwaddr
+#define icmp_id icmp_hun.ih_idseq.icd_id
+#define icmp_seq icmp_hun.ih_idseq.icd_seq
+#define icmp_void icmp_hun.ih_void
+#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void
+#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu
+#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs
+#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa
+#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime
+ union {
+ struct id_ts { /* ICMP Timestamp */
+ /*
+ * The next 3 fields are in network format,
+ * milliseconds since 00:00 GMT
+ */
+ uint32_t its_otime; /* Originate */
+ uint32_t its_rtime; /* Receive */
+ uint32_t its_ttime; /* Transmit */
+ } id_ts;
+ struct id_ip {
+ struct ip idi_ip;
+ /* options and then 64 bits of data */
+ } id_ip;
+ struct icmp_ra_addr id_radv;
+ uint32_t id_mask;
+ char id_data[1];
+ } icmp_dun;
+#define icmp_otime icmp_dun.id_ts.its_otime
+#define icmp_rtime icmp_dun.id_ts.its_rtime
+#define icmp_ttime icmp_dun.id_ts.its_ttime
+#define icmp_ip icmp_dun.id_ip.idi_ip
+#define icmp_radv icmp_dun.id_radv
+#define icmp_mask icmp_dun.id_mask
+#define icmp_data icmp_dun.id_data
+};
+
+/*
+ * Lower bounds on packet lengths for various types.
+ * For the error advice packets must first insure that the
+ * packet is large enough to contain the returned ip header.
+ * Only then can we do the check to see if 64 bits of packet
+ * data have been returned, since we need to check the returned
+ * ip header length.
+ */
+#define ICMP_MINLEN 8 /* abs minimum */
+#define ICMP_TSLEN (8 + 3 * sizeof (uint32_t)) /* timestamp */
+#define ICMP_MASKLEN 12 /* address mask */
+#define ICMP_ADVLENMIN (8 + sizeof (struct ip) + 8) /* min */
+#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8)
+ /* N.B.: must separately check that ip_hl >= 5 */
+
+/*
+ * Definition of type and code field values.
+ */
+#define ICMP_ECHOREPLY 0 /* echo reply */
+#define ICMP_UNREACH 3 /* dest unreachable, codes: */
+#define ICMP_UNREACH_NET 0 /* bad net */
+#define ICMP_UNREACH_HOST 1 /* bad host */
+#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */
+#define ICMP_UNREACH_PORT 3 /* bad port */
+#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */
+#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */
+#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */
+#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */
+#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */
+#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */
+#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */
+#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */
+#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */
+#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */
+#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */
+#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */
+#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */
+#define ICMP_REDIRECT 5 /* shorter route, codes: */
+#define ICMP_REDIRECT_NET 0 /* for network */
+#define ICMP_REDIRECT_HOST 1 /* for host */
+#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */
+#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */
+#define ICMP_ALTHOSTADDR 6 /* alternate host address */
+#define ICMP_ECHO 8 /* echo service */
+#define ICMP_ROUTERADVERT 9 /* router advertisement */
+#define ICMP_ROUTERADVERT_NORMAL 0 /* normal advertisement */
+#define ICMP_ROUTERADVERT_NOROUTE_COMMON 16 /* selective routing */
+#define ICMP_ROUTERSOLICIT 10 /* router solicitation */
+#define ICMP_TIMXCEED 11 /* time exceeded, code: */
+#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */
+#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */
+#define ICMP_PARAMPROB 12 /* ip header bad */
+#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */
+#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */
+#define ICMP_PARAMPROB_LENGTH 2 /* bad length */
+#define ICMP_TSTAMP 13 /* timestamp request */
+#define ICMP_TSTAMPREPLY 14 /* timestamp reply */
+#define ICMP_IREQ 15 /* information request */
+#define ICMP_IREQREPLY 16 /* information reply */
+#define ICMP_MASKREQ 17 /* address mask request */
+#define ICMP_MASKREPLY 18 /* address mask reply */
+#define ICMP_TRACEROUTE 30 /* traceroute */
+#define ICMP_DATACONVERR 31 /* data conversion error */
+#define ICMP_MOBILE_REDIRECT 32 /* mobile host redirect */
+#define ICMP_IPV6_WHEREAREYOU 33 /* IPv6 where-are-you */
+#define ICMP_IPV6_IAMHERE 34 /* IPv6 i-am-here */
+#define ICMP_MOBILE_REGREQUEST 35 /* mobile registration req */
+#define ICMP_MOBILE_REGREPLY 36 /* mobile registration reply */
+#define ICMP_SKIP 39 /* SKIP */
+#define ICMP_PHOTURIS 40 /* Photuris */
+#define ICMP_PHOTURIS_UNKNOWN_INDEX 1 /* unknown sec index */
+#define ICMP_PHOTURIS_AUTH_FAILED 2 /* auth failed */
+#define ICMP_PHOTURIS_DECRYPT_FAILED 3 /* decrypt failed */
+
+#define ICMP_MAXTYPE 40
+
+#define ICMP_INFOTYPE(type) \
+ ((type) == ICMP_ECHOREPLY || (type) == ICMP_ECHO || \
+ (type) == ICMP_ROUTERADVERT || (type) == ICMP_ROUTERSOLICIT || \
+ (type) == ICMP_TSTAMP || (type) == ICMP_TSTAMPREPLY || \
+ (type) == ICMP_IREQ || (type) == ICMP_IREQREPLY || \
+ (type) == ICMP_MASKREQ || (type) == ICMP_MASKREPLY)
+
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_malloc.h b/ext/sctp/usrsctp/usrsctplib/user_malloc.h
new file mode 100644
index 000000000..161245216
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_malloc.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 1987, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/* This file has been renamed user_malloc.h for Userspace */
+#ifndef _USER_MALLOC_H_
+#define _USER_MALLOC_H_
+
+/*__Userspace__*/
+#include <stdlib.h>
+#include <sys/types.h>
+#if !defined(_WIN32)
+#include <strings.h>
+#include <stdint.h>
+#else
+#if (defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__MSVCRT_VERSION__) && __MSVCRT_VERSION__ >= 1400)
+#include <stdint.h>
+#elif defined(SCTP_STDINT_INCLUDE)
+#include SCTP_STDINT_INCLUDE
+#else
+#define uint32_t unsigned __int32
+#define uint64_t unsigned __int64
+#endif
+#include <winsock2.h>
+#endif
+
+#define MINALLOCSIZE UMA_SMALLEST_UNIT
+
+/*
+ * flags to malloc.
+ */
+#define M_NOWAIT 0x0001 /* do not block */
+#define M_WAITOK 0x0002 /* ok to block */
+#define M_ZERO 0x0100 /* bzero the allocation */
+#define M_NOVM 0x0200 /* don't ask VM for pages */
+#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
+
+#define M_MAGIC 877983977 /* time when first defined :-) */
+
+/*
+ * Two malloc type structures are present: malloc_type, which is used by a
+ * type owner to declare the type, and malloc_type_internal, which holds
+ * malloc-owned statistics and other ABI-sensitive fields, such as the set of
+ * malloc statistics indexed by the compile-time MAXCPU constant.
+ * Applications should avoid introducing dependence on the allocator private
+ * data layout and size.
+ *
+ * The malloc_type ks_next field is protected by malloc_mtx. Other fields in
+ * malloc_type are static after initialization so unsynchronized.
+ *
+ * Statistics in malloc_type_stats are written only when holding a critical
+ * section and running on the CPU associated with the index into the stat
+ * array, but read lock-free resulting in possible (minor) races, which the
+ * monitoring app should take into account.
+ */
+struct malloc_type_stats {
+ uint64_t mts_memalloced; /* Bytes allocated on CPU. */
+ uint64_t mts_memfreed; /* Bytes freed on CPU. */
+ uint64_t mts_numallocs; /* Number of allocates on CPU. */
+ uint64_t mts_numfrees; /* number of frees on CPU. */
+ uint64_t mts_size; /* Bitmask of sizes allocated on CPU. */
+ uint64_t _mts_reserved1; /* Reserved field. */
+ uint64_t _mts_reserved2; /* Reserved field. */
+ uint64_t _mts_reserved3; /* Reserved field. */
+};
+
+#ifndef MAXCPU /* necessary on Linux */
+#define MAXCPU 4 /* arbitrary? */
+#endif
+
+struct malloc_type_internal {
+ struct malloc_type_stats mti_stats[MAXCPU];
+};
+
+/*
+ * ABI-compatible version of the old 'struct malloc_type', only all stats are
+ * now malloc-managed in malloc-owned memory rather than in caller memory, so
+ * as to avoid ABI issues. The ks_next pointer is reused as a pointer to the
+ * internal data handle.
+ */
+struct malloc_type {
+ struct malloc_type *ks_next; /* Next in global chain. */
+ u_long _ks_memuse; /* No longer used. */
+ u_long _ks_size; /* No longer used. */
+ u_long _ks_inuse; /* No longer used. */
+ uint64_t _ks_calls; /* No longer used. */
+ u_long _ks_maxused; /* No longer used. */
+ u_long ks_magic; /* Detect programmer error. */
+ const char *ks_shortdesc; /* Printable type name. */
+
+ /*
+ * struct malloc_type was terminated with a struct mtx, which is no
+ * longer required. For ABI reasons, continue to flesh out the full
+ * size of the old structure, but reuse the _lo_class field for our
+ * internal data handle.
+ */
+ void *ks_handle; /* Priv. data, was lo_class. */
+ const char *_lo_name;
+ const char *_lo_type;
+ u_int _lo_flags;
+ void *_lo_list_next;
+ struct witness *_lo_witness;
+ uintptr_t _mtx_lock;
+ u_int _mtx_recurse;
+};
+
+/*
+ * Statistics structure headers for user space. The kern.malloc sysctl
+ * exposes a structure stream consisting of a stream header, then a series of
+ * malloc type headers and statistics structures (quantity maxcpus). For
+ * convenience, the kernel will provide the current value of maxcpus at the
+ * head of the stream.
+ */
+#define MALLOC_TYPE_STREAM_VERSION 0x00000001
+struct malloc_type_stream_header {
+ uint32_t mtsh_version; /* Stream format version. */
+ uint32_t mtsh_maxcpus; /* Value of MAXCPU for stream. */
+ uint32_t mtsh_count; /* Number of records. */
+ uint32_t _mtsh_pad; /* Pad/reserved field. */
+};
+
+#define MALLOC_MAX_NAME 32
+struct malloc_type_header {
+ char mth_name[MALLOC_MAX_NAME];
+};
+
+/* __Userspace__
+Notice that at places it uses ifdef _KERNEL. That line cannot be
+removed because it causes conflicts with malloc definition in
+/usr/include/malloc.h, which essentially says that malloc.h has
+been overridden by stdlib.h. We will need to use names like
+user_malloc.h for isolating kernel interface headers. using
+original names like malloc.h in a user_include header can be
+confusing, All userspace header files are being placed in ./user_include
+Better still to remove from user_include.h all irrelevant code such
+as that in the block starting with #ifdef _KERNEL. I am only leaving
+it in for the time being to see what functionality is in this file
+that kernel uses.
+
+Start copy: Copied code for __Userspace__ */
+#define MALLOC_DEFINE(type, shortdesc, longdesc) \
+ struct malloc_type type[1] = { \
+ { NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc, NULL, NULL, \
+ NULL, 0, NULL, NULL, 0, 0 } \
+ }
+
+/* Removed "extern" in __Userspace__ code */
+/* If we need to use MALLOC_DECLARE before using MALLOC then
+ we have to remove extern.
+ In /usr/include/sys/malloc.h there is this definition:
+ #define MALLOC_DECLARE(type) \
+ extern struct malloc_type type[1]
+ and loader is unable to find the extern malloc_type because
+ it may be defined in one of kernel object files.
+ It seems that MALLOC_DECLARE and MALLOC_DEFINE cannot be used at
+ the same time for same "type" variable. Also, in Randall's architecture
+ document, where it specifies O/S specific macros and functions, it says
+ that the name in SCTP_MALLOC does not have to be used.
+*/
+#define MALLOC_DECLARE(type) \
+ extern struct malloc_type type[1]
+
+#define FREE(addr, type) free((addr))
+
+/* changed definitions of MALLOC and FREE */
+/* Using memset if flag M_ZERO is specified. Todo: M_WAITOK and M_NOWAIT */
+#define MALLOC(space, cast, size, type, flags) \
+ ((space) = (cast)malloc((u_long)(size))); \
+ do { \
+ if (flags & M_ZERO) { \
+ memset(space,0,size); \
+ } \
+ } while (0);
+
+#endif /* !_SYS_MALLOC_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/user_mbuf.c b/ext/sctp/usrsctp/usrsctplib/user_mbuf.c
new file mode 100644
index 000000000..9e566e302
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_mbuf.c
@@ -0,0 +1,1566 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
+ * We are initializing two zones for Mbufs and Clusters.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+/* #include <sys/param.h> This defines MSIZE 256 */
+#if !defined(SCTP_SIMPLE_ALLOCATOR)
+#include "umem.h"
+#endif
+#include "user_mbuf.h"
+#include "user_environment.h"
+#include "user_atomic.h"
+#include "netinet/sctp_pcb.h"
+
+#define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
+#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
+int max_linkhdr = KIPC_MAX_LINKHDR;
+int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
+
+/*
+ * Zones from which we allocate.
+ */
+sctp_zone_t zone_mbuf;
+sctp_zone_t zone_clust;
+sctp_zone_t zone_ext_refcnt;
+
+/* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
+ * and mb_dtor_clust.
+ * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
+ * struct mbuf * clust_mb_args; does not work.
+ */
+struct clust_args clust_mb_args;
+
+
+/* __Userspace__
+ * Local prototypes.
+ */
+static int mb_ctor_mbuf(void *, void *, int);
+static int mb_ctor_clust(void *, void *, int);
+static void mb_dtor_mbuf(void *, void *);
+static void mb_dtor_clust(void *, void *);
+
+
+/***************** Functions taken from user_mbuf.h *************/
+
+static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
+{
+ int flags = pkthdr;
+ if (type == MT_NOINIT)
+ return (0);
+
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_len = 0;
+ m->m_flags = flags;
+ m->m_type = type;
+ if (flags & M_PKTHDR) {
+ m->m_data = m->m_pktdat;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+ } else
+ m->m_data = m->m_dat;
+
+ return (0);
+}
+
+/* __Userspace__ */
+struct mbuf *
+m_get(int how, short type)
+{
+ struct mbuf *mret;
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ struct mb_args mbuf_mb_args;
+
+ /* The following setter function is not yet being enclosed within
+ * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
+ * mb_dtor_mbuf. See comment there
+ */
+ mbuf_mb_args.flags = 0;
+ mbuf_mb_args.type = type;
+#endif
+ /* Mbuf master zone, zone_mbuf, has already been
+ * created in mbuf_initialize() */
+ mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
+#endif
+ /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
+
+ /* There are cases when an object available in the current CPU's
+ * loaded magazine and in those cases the object's constructor is not applied.
+ * If that is the case, then we are duplicating constructor initialization here,
+ * so that the mbuf is properly constructed before returning it.
+ */
+ if (mret) {
+#if USING_MBUF_CONSTRUCTOR
+ if (! (mret->m_type == type) ) {
+ mbuf_constructor_dup(mret, 0, type);
+ }
+#else
+ mbuf_constructor_dup(mret, 0, type);
+#endif
+
+ }
+ return mret;
+}
+
+
+/* __Userspace__ */
+struct mbuf *
+m_gethdr(int how, short type)
+{
+ struct mbuf *mret;
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ struct mb_args mbuf_mb_args;
+
+ /* The following setter function is not yet being enclosed within
+ * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
+ * mb_dtor_mbuf. See comment there
+ */
+ mbuf_mb_args.flags = M_PKTHDR;
+ mbuf_mb_args.type = type;
+#endif
+ mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
+#endif
+ /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
+ /* There are cases when an object available in the current CPU's
+ * loaded magazine and in those cases the object's constructor is not applied.
+ * If that is the case, then we are duplicating constructor initialization here,
+ * so that the mbuf is properly constructed before returning it.
+ */
+ if (mret) {
+#if USING_MBUF_CONSTRUCTOR
+ if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
+ mbuf_constructor_dup(mret, M_PKTHDR, type);
+ }
+#else
+ mbuf_constructor_dup(mret, M_PKTHDR, type);
+#endif
+ }
+ return mret;
+}
+
+/* __Userspace__ */
+struct mbuf *
+m_free(struct mbuf *m)
+{
+
+ struct mbuf *n = m->m_next;
+
+ if (m->m_flags & M_EXT)
+ mb_free_ext(m);
+ else if ((m->m_flags & M_NOFREE) == 0) {
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_dtor_mbuf(m, NULL);
+#endif
+ SCTP_ZONE_FREE(zone_mbuf, m);
+ }
+ /*umem_cache_free(zone_mbuf, m);*/
+ return (n);
+}
+
+
+static void
+clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
+{
+ u_int *refcnt;
+ int type, size;
+
+ if (m == NULL) {
+ return;
+ }
+ /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
+ type = EXT_CLUSTER;
+ size = MCLBYTES;
+
+ refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
+ /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
+#if !defined(SCTP_SIMPLE_ALLOCATOR)
+ if (refcnt == NULL) {
+ umem_reap();
+ refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
+ /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
+ }
+#endif
+ *refcnt = 1;
+ m->m_ext.ext_buf = (caddr_t)m_clust;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_args = NULL;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = refcnt;
+ return;
+}
+
+
+/* __Userspace__ */
+void
+m_clget(struct mbuf *m, int how)
+{
+ caddr_t mclust_ret;
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ struct clust_args clust_mb_args_l;
+#endif
+ if (m->m_flags & M_EXT) {
+ SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
+ }
+ m->m_ext.ext_buf = (char *)NULL;
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ clust_mb_args_l.parent_mbuf = m;
+#endif
+ mclust_ret = SCTP_ZONE_GET(zone_clust, char);
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
+#endif
+ /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
+ /*
+ On a cluster allocation failure, call umem_reap() and retry.
+ */
+
+ if (mclust_ret == NULL) {
+#if !defined(SCTP_SIMPLE_ALLOCATOR)
+ /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
+ mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
+#else*/
+ umem_reap();
+ mclust_ret = SCTP_ZONE_GET(zone_clust, char);
+#endif
+ /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
+ /* if (NULL == mclust_ret) { */
+ SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
+ /* } */
+ }
+
+#if USING_MBUF_CONSTRUCTOR
+ if ((m->m_ext.ext_buf == NULL)) {
+ clust_constructor_dup(mclust_ret, m);
+ }
+#else
+ clust_constructor_dup(mclust_ret, m);
+#endif
+}
+
+struct mbuf *
+m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf)
+{
+ struct mbuf *mb, *nm = NULL, *mtail = NULL;
+ int size, mbuf_threshold, space_needed = len;
+
+ KASSERT(len >= 0, ("%s: len is < 0", __func__));
+
+ /* Validate flags. */
+ flags &= (M_PKTHDR | M_EOR);
+
+ /* Packet header mbuf must be first in chain. */
+ if ((flags & M_PKTHDR) && m != NULL) {
+ flags &= ~M_PKTHDR;
+ }
+
+ if (allonebuf == 0)
+ mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+ else
+ mbuf_threshold = 1;
+
+ /* Loop and append maximum sized mbufs to the chain tail. */
+ while (len > 0) {
+ if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) {
+ mb = m_gethdr(how, type);
+ MCLGET(mb, how);
+ size = MCLBYTES;
+ /* SCTP_BUF_LEN(mb) = MCLBYTES; */
+ } else if (flags & M_PKTHDR) {
+ mb = m_gethdr(how, type);
+ if (len < MHLEN) {
+ size = len;
+ } else {
+ size = MHLEN;
+ }
+ } else {
+ mb = m_get(how, type);
+ if (len < MLEN) {
+ size = len;
+ } else {
+ size = MLEN;
+ }
+ }
+
+ /* Fail the whole operation if one mbuf can't be allocated. */
+ if (mb == NULL) {
+ if (nm != NULL)
+ m_freem(nm);
+ return (NULL);
+ }
+
+ if (allonebuf != 0 && size < space_needed) {
+ m_freem(mb);
+ return (NULL);
+ }
+
+ /* Book keeping. */
+ len -= size;
+ if (mtail != NULL)
+ mtail->m_next = mb;
+ else
+ nm = mb;
+ mtail = mb;
+ flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
+ }
+ if (flags & M_EOR) {
+ mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
+ }
+
+ /* If mbuf was supplied, append new chain to the end of it. */
+ if (m != NULL) {
+ for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
+ mtail->m_next = nm;
+ mtail->m_flags &= ~M_EOR;
+ } else {
+ m = nm;
+ }
+
+ return (m);
+}
+
+/*
+ * Copy the contents of uio into a properly sized mbuf chain.
+ */
+struct mbuf *
+m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
+{
+ struct mbuf *m, *mb;
+ int error, length;
+ ssize_t total;
+ int progress = 0;
+
+ /*
+ * len can be zero or an arbitrary large value bound by
+ * the total data supplied by the uio.
+ */
+ if (len > 0)
+ total = min(uio->uio_resid, len);
+ else
+ total = uio->uio_resid;
+ /*
+ * The smallest unit returned by m_getm2() is a single mbuf
+ * with pkthdr. We can't align past it.
+ */
+ if (align >= MHLEN)
+ return (NULL);
+ /*
+ * Give us the full allocation or nothing.
+ * If len is zero return the smallest empty mbuf.
+ */
+ m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0);
+ if (m == NULL)
+ return (NULL);
+ m->m_data += align;
+
+ /* Fill all mbufs with uio data and update header information. */
+ for (mb = m; mb != NULL; mb = mb->m_next) {
+ length = (int)min(M_TRAILINGSPACE(mb), total - progress);
+ error = uiomove(mtod(mb, void *), length, uio);
+ if (error) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ mb->m_len = length;
+ progress += length;
+ if (flags & M_PKTHDR)
+ m->m_pkthdr.len += length;
+ }
+ KASSERT(progress == total, ("%s: progress != total", __func__));
+
+ return (m);
+}
+
+u_int
+m_length(struct mbuf *m0, struct mbuf **last)
+{
+ struct mbuf *m;
+ u_int len;
+
+ len = 0;
+ for (m = m0; m != NULL; m = m->m_next) {
+ len += m->m_len;
+ if (m->m_next == NULL)
+ break;
+ }
+ if (last != NULL)
+ *last = m;
+ return (len);
+}
+
+struct mbuf *
+m_last(struct mbuf *m)
+{
+ while (m->m_next) {
+ m = m->m_next;
+ }
+ return (m);
+}
+
+/*
+ * Unlink a tag from the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_unlink(struct mbuf *m, struct m_tag *t)
+{
+
+ SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
+}
+
+/*
+ * Reclaim resources associated with a tag.
+ */
+static __inline void
+m_tag_free(struct m_tag *t)
+{
+
+ (*t->m_tag_free)(t);
+}
+
+/*
+ * Set up the contents of a tag. Note that this does not fill in the free
+ * method; the caller is expected to do that.
+ *
+ * XXX probably should be called m_tag_init, but that was already taken.
+ */
+static __inline void
+m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len)
+{
+
+ t->m_tag_id = type;
+ t->m_tag_len = len;
+ t->m_tag_cookie = cookie;
+}
+
+/************ End functions from user_mbuf.h ******************/
+
+
+
+/************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
+
+void
+mbuf_initialize(void *dummy)
+{
+
+ /*
+ * __Userspace__Configure UMA zones for Mbufs and Clusters.
+ * (TODO: m_getcl() - using packet secondary zone).
+ * There is no provision for trash_init and trash_fini in umem.
+ *
+ */
+ /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
+ mb_ctor_mbuf, mb_dtor_mbuf, NULL,
+ &mbuf_mb_args,
+ NULL, 0);
+ zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
+#else
+ zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
+ mb_ctor_mbuf, mb_dtor_mbuf, NULL,
+ NUULL,
+ NULL, 0);
+#endif
+ /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
+ NULL, NULL, NULL,
+ NULL,
+ NULL, 0);*/
+ SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
+
+ /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
+ mb_ctor_clust, mb_dtor_clust, NULL,
+ &clust_mb_args,
+ NULL, 0);
+ zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
+#else
+ zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
+ mb_ctor_clust, mb_dtor_clust, NULL,
+ &clust_mb_args,
+ NULL, 0);
+#endif
+
+ /* uma_prealloc() goes here... */
+
+ /* __Userspace__ Add umem_reap here for low memory situation?
+ *
+ */
+
+}
+
+
+
+/*
+ * __Userspace__
+ *
+ * Constructor for Mbuf master zone. We have a different constructor
+ * for allocating the cluster.
+ *
+ * The 'arg' pointer points to a mb_args structure which
+ * contains call-specific information required to support the
+ * mbuf allocation API. See user_mbuf.h.
+ *
+ * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
+ * was passed when umem_cache_alloc was called.
+ * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
+ * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
+ * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
+ * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
+ * flag.
+ *
+ * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
+ * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
+ * It also mentions that umem_nofail_callback is Evolving.
+ *
+ */
+static int
+mb_ctor_mbuf(void *mem, void *arg, int flgs)
+{
+#if USING_MBUF_CONSTRUCTOR
+ struct mbuf *m;
+ struct mb_args *args;
+
+ int flags;
+ short type;
+
+ m = (struct mbuf *)mem;
+ args = (struct mb_args *)arg;
+ flags = args->flags;
+ type = args->type;
+
+ /*
+ * The mbuf is initialized later.
+ *
+ */
+ if (type == MT_NOINIT)
+ return (0);
+
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_len = 0;
+ m->m_flags = flags;
+ m->m_type = type;
+ if (flags & M_PKTHDR) {
+ m->m_data = m->m_pktdat;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+ } else
+ m->m_data = m->m_dat;
+#endif
+ return (0);
+}
+
+
+/*
+ * __Userspace__
+ * The Mbuf master zone destructor.
+ * This would be called in response to umem_cache_destroy
+ * TODO: Recheck if this is what we want to do in this destructor.
+ * (Note: the number of times mb_dtor_mbuf is called is equal to the
+ * number of individual mbufs allocated from zone_mbuf.
+ */
+static void
+mb_dtor_mbuf(void *mem, void *arg)
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem;
+ if ((m->m_flags & M_PKTHDR) != 0) {
+ m_tag_delete_chain(m, NULL);
+ }
+}
+
+
+/* __Userspace__
+ * The Cluster zone constructor.
+ *
+ * Here the 'arg' pointer points to the Mbuf which we
+ * are configuring cluster storage for. If 'arg' is
+ * empty we allocate just the cluster without setting
+ * the mbuf to it. See mbuf.h.
+ */
+static int
+mb_ctor_clust(void *mem, void *arg, int flgs)
+{
+
+#if USING_MBUF_CONSTRUCTOR
+ struct mbuf *m;
+ struct clust_args * cla;
+ u_int *refcnt;
+ int type, size;
+ sctp_zone_t zone;
+
+ /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
+ type = EXT_CLUSTER;
+ zone = zone_clust;
+ size = MCLBYTES;
+
+ cla = (struct clust_args *)arg;
+ m = cla->parent_mbuf;
+
+ refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
+ /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
+ *refcnt = 1;
+
+ if (m != NULL) {
+ m->m_ext.ext_buf = (caddr_t)mem;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_args = NULL;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = refcnt;
+ }
+#endif
+ return (0);
+}
+
+/* __Userspace__ */
+static void
+mb_dtor_clust(void *mem, void *arg)
+{
+
+ /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
+ /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
+ * mb_dtor_clust is called is equal to the number of individual mbufs allocated
+ * from zone_clust. Similarly for mb_dtor_mbuf).
+ * At this point the following:
+ * struct mbuf *m;
+ * m = (struct mbuf *)arg;
+ * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
+ * has been done in mb_free_ext().
+ */
+
+}
+
+
+
+
+/* Unlink and free a packet tag. */
+void
+m_tag_delete(struct mbuf *m, struct m_tag *t)
+{
+ KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
+ m_tag_unlink(m, t);
+ m_tag_free(t);
+}
+
+
+/* Unlink and free a packet tag chain, starting from given tag. */
+void
+m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
+{
+
+ struct m_tag *p, *q;
+
+ KASSERT(m, ("m_tag_delete_chain: null mbuf"));
+ if (t != NULL)
+ p = t;
+ else
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ if (p == NULL)
+ return;
+ while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
+ m_tag_delete(m, q);
+ m_tag_delete(m, p);
+}
+
+#if 0
+static void
+sctp_print_mbuf_chain(struct mbuf *m)
+{
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
+ for(; m; m=m->m_next) {
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
+ if (m->m_flags & M_EXT)
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
+ }
+}
+#endif
+
+/*
+ * Free an entire chain of mbufs and associated external buffers, if
+ * applicable.
+ */
+void
+m_freem(struct mbuf *mb)
+{
+ while (mb != NULL)
+ mb = m_free(mb);
+}
+
+/*
+ * __Userspace__
+ * clean mbufs with M_EXT storage attached to them
+ * if the reference count hits 1.
+ */
+void
+mb_free_ext(struct mbuf *m)
+{
+
+ int skipmbuf;
+
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+
+ /*
+ * check if the header is embedded in the cluster
+ */
+ skipmbuf = (m->m_flags & M_NOFREE);
+
+ /* Free the external attached storage if this
+ * mbuf is the only reference to it.
+ *__Userspace__ TODO: jumbo frames
+ *
+ */
+ /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
+ reduces to here before but the IPHONE malloc commit had changed
+ this to compare to 0 instead of 1 (see next line). Why?
+ . .. this caused a huge memory leak in Linux.
+ */
+#ifdef IPHONE
+ if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
+#else
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
+#endif
+ {
+ if (m->m_ext.ext_type == EXT_CLUSTER){
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
+#endif
+ SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
+ SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
+ m->m_ext.ref_cnt = NULL;
+ }
+ }
+
+ if (skipmbuf)
+ return;
+
+
+ /* __Userspace__ Also freeing the storage for ref_cnt
+ * Free this mbuf back to the mbuf zone with all m_ext
+ * information purged.
+ */
+ m->m_ext.ext_buf = NULL;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_args = NULL;
+ m->m_ext.ref_cnt = NULL;
+ m->m_ext.ext_size = 0;
+ m->m_ext.ext_type = 0;
+ m->m_flags &= ~M_EXT;
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+ mb_dtor_mbuf(m, NULL);
+#endif
+ SCTP_ZONE_FREE(zone_mbuf, m);
+
+ /*umem_cache_free(zone_mbuf, m);*/
+}
+
+/*
+ * "Move" mbuf pkthdr from "from" to "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ */
+void
+m_move_pkthdr(struct mbuf *to, struct mbuf *from)
+{
+
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr; /* especially tags */
+ SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
+ from->m_flags &= ~M_PKTHDR;
+}
+
+
+/*
+ * Rearange an mbuf chain so that len bytes are contiguous
+ * and in the data area of an mbuf (so that mtod and dtom
+ * will work for a structure of size len). Returns the resulting
+ * mbuf chain on success, frees it and returns null on failure.
+ * If there is room, it will add up to max_protohdr-len extra bytes to the
+ * contiguous region in an attempt to avoid being called next time.
+ */
+struct mbuf *
+m_pullup(struct mbuf *n, int len)
+{
+ struct mbuf *m;
+ int count;
+ int space;
+
+ /*
+ * If first mbuf has no cluster, and has room for len bytes
+ * without shifting current data, pullup into it,
+ * otherwise allocate a new mbuf to prepend to the chain.
+ */
+ if ((n->m_flags & M_EXT) == 0 &&
+ n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
+ if (n->m_len >= len)
+ return (n);
+ m = n;
+ n = n->m_next;
+ len -= m->m_len;
+ } else {
+ if (len > MHLEN)
+ goto bad;
+ MGET(m, M_NOWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ }
+ space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+ n = m_free(n);
+ } while (len > 0 && n);
+ if (len > 0) {
+ (void) m_free(m);
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+bad:
+ m_freem(n);
+ return (NULL);
+}
+
+
+static struct mbuf *
+m_dup1(struct mbuf *m, int off, int len, int wait)
+{
+ struct mbuf *n = NULL;
+ int copyhdr;
+
+ if (len > MCLBYTES)
+ return NULL;
+ if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
+ copyhdr = 1;
+ else
+ copyhdr = 0;
+ if (len >= MINCLSIZE) {
+ if (copyhdr == 1) {
+ m_clget(n, wait); /* TODO: include code for copying the header */
+ m_dup_pkthdr(n, m, wait);
+ } else
+ m_clget(n, wait);
+ } else {
+ if (copyhdr == 1)
+ n = m_gethdr(wait, m->m_type);
+ else
+ n = m_get(wait, m->m_type);
+ }
+ if (!n)
+ return NULL; /* ENOBUFS */
+
+ if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
+ m_free(n);
+ return NULL;
+ }
+ m_copydata(m, off, len, mtod(n, caddr_t));
+ n->m_len = len;
+ return n;
+}
+
+
+/* Taken from sys/kern/uipc_mbuf2.c */
+struct mbuf *
+m_pulldown(struct mbuf *m, int off, int len, int *offp)
+{
+ struct mbuf *n, *o;
+ int hlen, tlen, olen;
+ int writable;
+
+ /* check invalid arguments. */
+ KASSERT(m, ("m == NULL in m_pulldown()"));
+ if (len > MCLBYTES) {
+ m_freem(m);
+ return NULL; /* impossible */
+ }
+
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
+ for (t = m; t; t = t->m_next)
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
+ }
+#endif
+ n = m;
+ while (n != NULL && off > 0) {
+ if (n->m_len > off)
+ break;
+ off -= n->m_len;
+ n = n->m_next;
+ }
+ /* be sure to point non-empty mbuf */
+ while (n != NULL && n->m_len == 0)
+ n = n->m_next;
+ if (!n) {
+ m_freem(m);
+ return NULL; /* mbuf chain too short */
+ }
+
+ writable = 0;
+ if ((n->m_flags & M_EXT) == 0 ||
+ (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
+ writable = 1;
+
+ /*
+ * the target data is on <n, off>.
+ * if we got enough data on the mbuf "n", we're done.
+ */
+ if ((off == 0 || offp) && len <= n->m_len - off && writable)
+ goto ok;
+
+ /*
+ * when len <= n->m_len - off and off != 0, it is a special case.
+ * len bytes from <n, off> sits in single mbuf, but the caller does
+ * not like the starting position (off).
+ * chop the current mbuf into two pieces, set off to 0.
+ */
+ if (len <= n->m_len - off) {
+ o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
+ if (o == NULL) {
+ m_freem(m);
+ return NULL; /* ENOBUFS */
+ }
+ n->m_len = off;
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+ /*
+ * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
+ * and construct contiguous mbuf with m_len == len.
+ * note that hlen + tlen == len, and tlen > 0.
+ */
+ hlen = n->m_len - off;
+ tlen = len - hlen;
+
+ /*
+ * ensure that we have enough trailing data on mbuf chain.
+ * if not, we can do nothing about the chain.
+ */
+ olen = 0;
+ for (o = n->m_next; o != NULL; o = o->m_next)
+ olen += o->m_len;
+ if (hlen + olen < len) {
+ m_freem(m);
+ return NULL; /* mbuf chain too short */
+ }
+
+ /*
+ * easy cases first.
+ * we need to use m_copydata() to get data from <n->m_next, 0>.
+ */
+ if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) {
+ m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
+ n->m_len += tlen;
+ m_adj(n->m_next, tlen);
+ goto ok;
+ }
+
+ if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) {
+ n->m_next->m_data -= hlen;
+ n->m_next->m_len += hlen;
+ memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
+ n->m_len -= hlen;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+
+ /*
+ * now, we need to do the hard way. don't m_copy as there's no room
+ * on both end.
+ */
+ if (len > MLEN)
+ m_clget(o, M_NOWAIT);
+ /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
+ else
+ o = m_get(M_NOWAIT, m->m_type);
+ if (!o) {
+ m_freem(m);
+ return NULL; /* ENOBUFS */
+ }
+ /* get hlen from <n, off> into <o, 0> */
+ o->m_len = hlen;
+ memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
+ n->m_len -= hlen;
+ /* get tlen from <n->m_next, 0> into <o, hlen> */
+ m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
+ o->m_len += tlen;
+ m_adj(n->m_next, tlen);
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = o;
+ off = 0;
+ok:
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
+ for (t = m; t; t = t->m_next)
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
+ SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
+ }
+#endif
+ if (offp)
+ *offp = off;
+ return n;
+}
+
+/*
+ * Attach the the cluster from *m to *n, set up m_ext in *n
+ * and bump the refcount of the cluster.
+ */
+static void
+mb_dupcl(struct mbuf *n, struct mbuf *m)
+{
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+ KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
+
+ if (*(m->m_ext.ref_cnt) == 1)
+ *(m->m_ext.ref_cnt) += 1;
+ else
+ atomic_add_int(m->m_ext.ref_cnt, 1);
+ n->m_ext.ext_buf = m->m_ext.ext_buf;
+ n->m_ext.ext_free = m->m_ext.ext_free;
+ n->m_ext.ext_args = m->m_ext.ext_args;
+ n->m_ext.ext_size = m->m_ext.ext_size;
+ n->m_ext.ref_cnt = m->m_ext.ref_cnt;
+ n->m_ext.ext_type = m->m_ext.ext_type;
+ n->m_flags |= M_EXT;
+}
+
+
+/*
+ * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
+ * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
+ * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
+ * Note that the copy is read-only, because clusters are not copied,
+ * only their reference counts are incremented.
+ */
+
+struct mbuf *
+m_copym(struct mbuf *m, int off0, int len, int wait)
+{
+ struct mbuf *n, **np;
+ int off = off0;
+ struct mbuf *top;
+ int copyhdr = 0;
+
+ KASSERT(off >= 0, ("m_copym, negative off %d", off));
+ KASSERT(len >= 0, ("m_copym, negative len %d", len));
+ KASSERT(m != NULL, ("m_copym, m is NULL"));
+
+#if !defined(INVARIANTS)
+ if (m == NULL) {
+ return (NULL);
+ }
+#endif
+ if (off == 0 && m->m_flags & M_PKTHDR)
+ copyhdr = 1;
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ np = &top;
+ top = 0;
+ while (len > 0) {
+ if (m == NULL) {
+ KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
+ break;
+ }
+ if (copyhdr)
+ MGETHDR(n, wait, m->m_type);
+ else
+ MGET(n, wait, m->m_type);
+ *np = n;
+ if (n == NULL)
+ goto nospace;
+ if (copyhdr) {
+ if (!m_dup_pkthdr(n, m, wait))
+ goto nospace;
+ if (len == M_COPYALL)
+ n->m_pkthdr.len -= off0;
+ else
+ n->m_pkthdr.len = len;
+ copyhdr = 0;
+ }
+ n->m_len = min(len, m->m_len - off);
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + off;
+ mb_dupcl(n, m);
+ } else
+ memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
+ if (len != M_COPYALL)
+ len -= n->m_len;
+ off = 0;
+ m = m->m_next;
+ np = &n->m_next;
+ }
+
+ return (top);
+nospace:
+ m_freem(top);
+ return (NULL);
+}
+
+
+int
+m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
+{
+ struct m_tag *p, *t, *tprev = NULL;
+
+ KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
+ m_tag_delete_chain(to, NULL);
+ SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
+ t = m_tag_copy(p, how);
+ if (t == NULL) {
+ m_tag_delete_chain(to, NULL);
+ return 0;
+ }
+ if (tprev == NULL)
+ SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
+ else
+ SLIST_INSERT_AFTER(tprev, t, m_tag_link);
+ tprev = t;
+ }
+ return 1;
+}
+
+/*
+ * Duplicate "from"'s mbuf pkthdr in "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ * In particular, this does a deep copy of the packet tags.
+ */
+int
+m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
+{
+
+ KASSERT(to, ("m_dup_pkthdr: to is NULL"));
+ KASSERT(from, ("m_dup_pkthdr: from is NULL"));
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr;
+ SLIST_INIT(&to->m_pkthdr.tags);
+ return (m_tag_copy_chain(to, from, MBTOM(how)));
+}
+
+/* Copy a single tag. */
+struct m_tag *
+m_tag_copy(struct m_tag *t, int how)
+{
+ struct m_tag *p;
+
+ KASSERT(t, ("m_tag_copy: null tag"));
+ p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
+ if (p == NULL)
+ return (NULL);
+ memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
+ return p;
+}
+
+/* Get a packet tag structure along with specified data following. */
+struct m_tag *
+m_tag_alloc(uint32_t cookie, int type, int len, int wait)
+{
+ struct m_tag *t;
+
+ if (len < 0)
+ return NULL;
+ t = malloc(len + sizeof(struct m_tag));
+ if (t == NULL)
+ return NULL;
+ m_tag_setup(t, cookie, type, len);
+ t->m_tag_free = m_tag_free_default;
+ return t;
+}
+
+/* Free a packet tag. */
+void
+m_tag_free_default(struct m_tag *t)
+{
+ free(t);
+}
+
+/*
+ * Copy data from a buffer back into the indicated mbuf chain,
+ * starting "off" bytes from the beginning, extending the mbuf
+ * chain if necessary.
+ */
+void
+m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
+{
+ int mlen;
+ struct mbuf *m = m0, *n;
+ int totlen = 0;
+
+ if (m0 == NULL)
+ return;
+ while (off > (mlen = m->m_len)) {
+ off -= mlen;
+ totlen += mlen;
+ if (m->m_next == NULL) {
+ n = m_get(M_NOWAIT, m->m_type);
+ if (n == NULL)
+ goto out;
+ memset(mtod(n, caddr_t), 0, MLEN);
+ n->m_len = min(MLEN, len + off);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+ while (len > 0) {
+ mlen = min (m->m_len - off, len);
+ memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
+ cp += mlen;
+ len -= mlen;
+ mlen += off;
+ off = 0;
+ totlen += mlen;
+ if (len == 0)
+ break;
+ if (m->m_next == NULL) {
+ n = m_get(M_NOWAIT, m->m_type);
+ if (n == NULL)
+ break;
+ n->m_len = min(MLEN, len);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
+ m->m_pkthdr.len = totlen;
+}
+
+
+/*
+ * Lesser-used path for M_PREPEND:
+ * allocate new mbuf to prepend to chain,
+ * copy junk along.
+ */
+struct mbuf *
+m_prepend(struct mbuf *m, int len, int how)
+{
+ struct mbuf *mn;
+
+ if (m->m_flags & M_PKTHDR)
+ MGETHDR(mn, how, m->m_type);
+ else
+ MGET(mn, how, m->m_type);
+ if (mn == NULL) {
+ m_freem(m);
+ return (NULL);
+ }
+ if (m->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(mn, m);
+ mn->m_next = m;
+ m = mn;
+ if (m->m_flags & M_PKTHDR) {
+ if (len < MHLEN)
+ MH_ALIGN(m, len);
+ } else {
+ if (len < MLEN)
+ M_ALIGN(m, len);
+ }
+ m->m_len = len;
+ return (m);
+}
+
+/*
+ * Copy data from an mbuf chain starting "off" bytes from the beginning,
+ * continuing for "len" bytes, into the indicated buffer.
+ */
+void
+m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
+{
+ u_int count;
+
+ KASSERT(off >= 0, ("m_copydata, negative off %d", off));
+ KASSERT(len >= 0, ("m_copydata, negative len %d", len));
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ while (len > 0) {
+ KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
+ count = min(m->m_len - off, len);
+ memcpy(cp, mtod(m, caddr_t) + off, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ m = m->m_next;
+ }
+}
+
+
+/*
+ * Concatenate mbuf chain n to m.
+ * Both chains must be of the same type (e.g. MT_DATA).
+ * Any m_pkthdr is not updated.
+ */
+void
+m_cat(struct mbuf *m, struct mbuf *n)
+{
+ while (m->m_next)
+ m = m->m_next;
+ while (n) {
+ if (m->m_flags & M_EXT ||
+ m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
+ /* just join the two chains */
+ m->m_next = n;
+ return;
+ }
+ /* splat the data from one into the other */
+ memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
+ m->m_len += n->m_len;
+ n = m_free(n);
+ }
+}
+
+
+void
+m_adj(struct mbuf *mp, int req_len)
+{
+ int len = req_len;
+ struct mbuf *m;
+ int count;
+
+ if ((m = mp) == NULL)
+ return;
+ if (len >= 0) {
+ /*
+ * Trim from head.
+ */
+ while (m != NULL && len > 0) {
+ if (m->m_len <= len) {
+ len -= m->m_len;
+ m->m_len = 0;
+ m = m->m_next;
+ } else {
+ m->m_len -= len;
+ m->m_data += len;
+ len = 0;
+ }
+ }
+ m = mp;
+ if (mp->m_flags & M_PKTHDR)
+ m->m_pkthdr.len -= (req_len - len);
+ } else {
+ /*
+ * Trim from tail. Scan the mbuf chain,
+ * calculating its length and finding the last mbuf.
+ * If the adjustment only affects this mbuf, then just
+ * adjust and return. Otherwise, rescan and truncate
+ * after the remaining size.
+ */
+ len = -len;
+ count = 0;
+ for (;;) {
+ count += m->m_len;
+ if (m->m_next == (struct mbuf *)0)
+ break;
+ m = m->m_next;
+ }
+ if (m->m_len >= len) {
+ m->m_len -= len;
+ if (mp->m_flags & M_PKTHDR)
+ mp->m_pkthdr.len -= len;
+ return;
+ }
+ count -= len;
+ if (count < 0)
+ count = 0;
+ /*
+ * Correct length for chain is "count".
+ * Find the mbuf with last data, adjust its length,
+ * and toss data from remaining mbufs on chain.
+ */
+ m = mp;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len = count;
+ for (; m; m = m->m_next) {
+ if (m->m_len >= count) {
+ m->m_len = count;
+ if (m->m_next != NULL) {
+ m_freem(m->m_next);
+ m->m_next = NULL;
+ }
+ break;
+ }
+ count -= m->m_len;
+ }
+ }
+}
+
+
+/* m_split is used within sctp_handle_cookie_echo. */
+
+/*
+ * Partition an mbuf chain in two pieces, returning the tail --
+ * all but the first len0 bytes. In case of failure, it returns NULL and
+ * attempts to restore the chain to its original state.
+ *
+ * Note that the resulting mbufs might be read-only, because the new
+ * mbuf can end up sharing an mbuf cluster with the original mbuf if
+ * the "breaking point" happens to lie within a cluster mbuf. Use the
+ * M_WRITABLE() macro to check for this case.
+ */
+struct mbuf *
+m_split(struct mbuf *m0, int len0, int wait)
+{
+ struct mbuf *m, *n;
+ u_int len = len0, remain;
+
+ /* MBUF_CHECKSLEEP(wait); */
+ for (m = m0; m && (int)len > m->m_len; m = m->m_next)
+ len -= m->m_len;
+ if (m == NULL)
+ return (NULL);
+ remain = m->m_len - len;
+ if (m0->m_flags & M_PKTHDR) {
+ MGETHDR(n, wait, m0->m_type);
+ if (n == NULL)
+ return (NULL);
+ n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
+ n->m_pkthdr.len = m0->m_pkthdr.len - len0;
+ m0->m_pkthdr.len = len0;
+ if (m->m_flags & M_EXT)
+ goto extpacket;
+ if (remain > MHLEN) {
+ /* m can't be the lead packet */
+ MH_ALIGN(n, 0);
+ n->m_next = m_split(m, len, wait);
+ if (n->m_next == NULL) {
+ (void) m_free(n);
+ return (NULL);
+ } else {
+ n->m_len = 0;
+ return (n);
+ }
+ } else
+ MH_ALIGN(n, remain);
+ } else if (remain == 0) {
+ n = m->m_next;
+ m->m_next = NULL;
+ return (n);
+ } else {
+ MGET(n, wait, m->m_type);
+ if (n == NULL)
+ return (NULL);
+ M_ALIGN(n, remain);
+ }
+extpacket:
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + len;
+ mb_dupcl(n, m);
+ } else {
+ memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
+ }
+ n->m_len = remain;
+ m->m_len = len;
+ n->m_next = m->m_next;
+ m->m_next = NULL;
+ return (n);
+}
+
+
+
+
+int
+pack_send_buffer(caddr_t buffer, struct mbuf* mb){
+
+ int count_to_copy;
+ int total_count_copied = 0;
+ int offset = 0;
+
+ do {
+ count_to_copy = mb->m_len;
+ memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
+ offset += count_to_copy;
+ total_count_copied += count_to_copy;
+ mb = mb->m_next;
+ } while(mb);
+
+ return (total_count_copied);
+}
diff --git a/ext/sctp/usrsctp/usrsctplib/user_mbuf.h b/ext/sctp/usrsctp/usrsctplib/user_mbuf.h
new file mode 100644
index 000000000..c11b38c9c
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_mbuf.h
@@ -0,0 +1,413 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _USER_MBUF_H_
+#define _USER_MBUF_H_
+
+/* __Userspace__ header file for mbufs */
+#include <stdio.h>
+#if !defined(SCTP_SIMPLE_ALLOCATOR)
+#include "umem.h"
+#endif
+#include "user_malloc.h"
+#include "netinet/sctp_os_userspace.h"
+
+#define USING_MBUF_CONSTRUCTOR 0
+
+/* For Linux */
+#ifndef MSIZE
+#define MSIZE 256
+/* #define MSIZE 1024 */
+#endif
+#ifndef MCLBYTES
+#define MCLBYTES 2048
+#endif
+
+struct mbuf * m_gethdr(int how, short type);
+struct mbuf * m_get(int how, short type);
+struct mbuf * m_free(struct mbuf *m);
+void m_clget(struct mbuf *m, int how);
+struct mbuf * m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf);
+struct mbuf *m_uiotombuf(struct uio *uio, int how, int len, int align, int flags);
+u_int m_length(struct mbuf *m0, struct mbuf **last);
+struct mbuf *m_last(struct mbuf *m);
+
+/* mbuf initialization function */
+void mbuf_initialize(void *);
+
+#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
+#define MGET(m, how, type) ((m) = m_get((how), (type)))
+#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type)))
+#define MCLGET(m, how) m_clget((m), (how))
+
+
+#define M_HDR_PAD ((sizeof(intptr_t)==4) ? 2 : 6) /* modified for __Userspace__ */
+
+/* Length to m_copy to copy all. */
+#define M_COPYALL 1000000000
+
+/* umem_cache_t is defined in user_include/umem.h as
+ * typedef struct umem_cache umem_cache_t;
+ * Note:umem_zone_t is a pointer.
+ */
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+typedef size_t sctp_zone_t;
+#else
+typedef umem_cache_t *sctp_zone_t;
+#endif
+
+extern sctp_zone_t zone_mbuf;
+extern sctp_zone_t zone_clust;
+extern sctp_zone_t zone_ext_refcnt;
+
+/*-
+ * Macros for type conversion:
+ * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
+ * dtom(x) -- Convert data pointer within mbuf to mbuf pointer (XXX).
+ */
+#define mtod(m, t) ((t)((m)->m_data))
+#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
+
+struct mb_args {
+ int flags; /* Flags for mbuf being allocated */
+ short type; /* Type of mbuf being allocated */
+};
+
+struct clust_args {
+ struct mbuf * parent_mbuf;
+};
+
+struct mbuf * m_split(struct mbuf *, int, int);
+void m_cat(struct mbuf *m, struct mbuf *n);
+void m_adj(struct mbuf *, int);
+void mb_free_ext(struct mbuf *);
+void m_freem(struct mbuf *);
+struct m_tag *m_tag_alloc(uint32_t, int, int, int);
+struct mbuf *m_copym(struct mbuf *, int, int, int);
+void m_copyback(struct mbuf *, int, int, caddr_t);
+struct mbuf *m_pullup(struct mbuf *, int);
+struct mbuf *m_pulldown(struct mbuf *, int off, int len, int *offp);
+int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
+struct m_tag *m_tag_copy(struct m_tag *, int);
+int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
+struct mbuf *m_prepend(struct mbuf *, int, int);
+void m_copydata(const struct mbuf *, int, int, caddr_t);
+
+#define MBUF_MEM_NAME "mbuf"
+#define MBUF_CLUSTER_MEM_NAME "mbuf_cluster"
+#define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
+
+#define MT_NOINIT 255 /* Not a type but a flag to allocate
+ a non-initialized mbuf */
+
+/*
+ * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
+ * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
+ * sys/param.h), which has no additional overhead and is used instead of the
+ * internal data area; this is done when at least MINCLSIZE of data must be
+ * stored. Additionally, it is possible to allocate a separate buffer
+ * externally and attach it to the mbuf in a way similar to that of mbuf
+ * clusters.
+ */
+#define MLEN ((int)(MSIZE - sizeof(struct m_hdr))) /* normal data len */
+#define MHLEN ((int)(MLEN - sizeof(struct pkthdr))) /* data len w/pkthdr */
+#define MINCLSIZE ((int)(MHLEN + 1)) /* smallest amount to put in cluster */
+#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
+
+
+/*
+ * Header present at the beginning of every mbuf.
+ */
+struct m_hdr {
+ struct mbuf *mh_next; /* next buffer in chain */
+ struct mbuf *mh_nextpkt; /* next chain in queue/record */
+ caddr_t mh_data; /* location of data */
+ int mh_len; /* amount of data in this mbuf */
+ int mh_flags; /* flags; see below */
+ short mh_type; /* type of data in this mbuf */
+ uint8_t pad[M_HDR_PAD];/* word align */
+};
+
+/*
+ * Packet tag structure (see below for details).
+ */
+struct m_tag {
+ SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
+ uint16_t m_tag_id; /* Tag ID */
+ uint16_t m_tag_len; /* Length of data */
+ uint32_t m_tag_cookie; /* ABI/Module ID */
+ void (*m_tag_free)(struct m_tag *);
+};
+
+/*
+ * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
+ */
+struct pkthdr {
+ struct ifnet *rcvif; /* rcv interface */
+ /* variables for ip and tcp reassembly */
+ void *header; /* pointer to packet header */
+ int len; /* total packet length */
+ /* variables for hardware checksum */
+ int csum_flags; /* flags regarding checksum */
+ int csum_data; /* data field used by csum routines */
+ uint16_t tso_segsz; /* TSO segment size */
+ uint16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */
+ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
+};
+
+/*
+ * Description of external storage mapped into mbuf; valid only if M_EXT is
+ * set.
+ */
+struct m_ext {
+ caddr_t ext_buf; /* start of buffer */
+ void (*ext_free) /* free routine if not the usual */
+ (void *, void *);
+ void *ext_args; /* optional argument pointer */
+ u_int ext_size; /* size of buffer, for ext_free */
+ volatile u_int *ref_cnt; /* pointer to ref count info */
+ int ext_type; /* type of external storage */
+};
+
+
+/*
+ * The core of the mbuf object along with some shortcut defined for practical
+ * purposes.
+ */
+struct mbuf {
+ struct m_hdr m_hdr;
+ union {
+ struct {
+ struct pkthdr MH_pkthdr; /* M_PKTHDR set */
+ union {
+ struct m_ext MH_ext; /* M_EXT set */
+ char MH_databuf[MHLEN];
+ } MH_dat;
+ } MH;
+ char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
+ } M_dat;
+};
+
+#define m_next m_hdr.mh_next
+#define m_len m_hdr.mh_len
+#define m_data m_hdr.mh_data
+#define m_type m_hdr.mh_type
+#define m_flags m_hdr.mh_flags
+#define m_nextpkt m_hdr.mh_nextpkt
+#define m_act m_nextpkt
+#define m_pkthdr M_dat.MH.MH_pkthdr
+#define m_ext M_dat.MH.MH_dat.MH_ext
+#define m_pktdat M_dat.MH.MH_dat.MH_databuf
+#define m_dat M_dat.M_databuf
+
+
+/*
+ * mbuf flags.
+ */
+#define M_EXT 0x0001 /* has associated external storage */
+#define M_PKTHDR 0x0002 /* start of record */
+#define M_EOR 0x0004 /* end of record */
+#define M_RDONLY 0x0008 /* associated data is marked read-only */
+#define M_PROTO1 0x0010 /* protocol-specific */
+#define M_PROTO2 0x0020 /* protocol-specific */
+#define M_PROTO3 0x0040 /* protocol-specific */
+#define M_PROTO4 0x0080 /* protocol-specific */
+#define M_PROTO5 0x0100 /* protocol-specific */
+#define M_FREELIST 0x8000 /* mbuf is on the free list */
+
+
+/*
+ * Flags copied when copying m_pkthdr.
+ */
+#define M_COPYFLAGS (M_PKTHDR|M_EOR|M_RDONLY|M_PROTO1|M_PROTO1|M_PROTO2|\
+ M_PROTO3|M_PROTO4|M_PROTO5|\
+ M_BCAST|M_MCAST|M_FRAG|M_FIRSTFRAG|M_LASTFRAG|\
+ M_VLANTAG|M_PROMISC)
+
+
+/*
+ * mbuf pkthdr flags (also stored in m_flags).
+ */
+#define M_BCAST 0x0200 /* send/received as link-level broadcast */
+#define M_MCAST 0x0400 /* send/received as link-level multicast */
+#define M_FRAG 0x0800 /* packet is a fragment of a larger packet */
+#define M_FIRSTFRAG 0x1000 /* packet is first fragment */
+#define M_LASTFRAG 0x2000 /* packet is last fragment */
+#define M_VLANTAG 0x10000 /* ether_vtag is valid */
+#define M_PROMISC 0x20000 /* packet was not for us */
+#define M_NOFREE 0x40000 /* do not free mbuf - it is embedded in the cluster */
+
+
+/*
+ * External buffer types: identify ext_buf type.
+ */
+#define EXT_CLUSTER 1 /* mbuf cluster */
+#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
+#define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */
+#define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */
+#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
+#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
+#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
+#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
+#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
+#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
+#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
+
+
+/*
+ * mbuf types.
+ */
+#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
+#define MT_DATA 1 /* dynamic (data) allocation */
+#define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */
+#define MT_SONAME 8 /* socket name */
+#define MT_CONTROL 14 /* extra-data protocol message */
+#define MT_OOBDATA 15 /* expedited data */
+#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
+
+#define MT_NOINIT 255 /* Not a type but a flag to allocate
+ a non-initialized mbuf */
+
+/*
+ * __Userspace__ flags like M_NOWAIT are defined in malloc.h
+ * Flags like these are used in functions like uma_zalloc()
+ * but don't have an equivalent in userland umem
+ * Flags specifying how an allocation should be made.
+ *
+ * The flag to use is as follows:
+ * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation.
+ * - M_WAIT or M_WAITOK or M_TRYWAIT from wherever it is safe to block.
+ *
+ * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly and
+ * if we cannot allocate immediately we may return NULL, whereas
+ * M_WAIT/M_WAITOK/M_TRYWAIT means that if we cannot allocate resources we
+ * will block until they are available, and thus never return NULL.
+ *
+ * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT.
+ */
+#define MBTOM(how) (how)
+
+void m_tag_delete(struct mbuf *, struct m_tag *);
+void m_tag_delete_chain(struct mbuf *, struct m_tag *);
+void m_move_pkthdr(struct mbuf *, struct mbuf *);
+void m_tag_free_default(struct m_tag *);
+
+extern int max_linkhdr; /* Largest link-level header */
+extern int max_protohdr; /* Size of largest protocol layer header. See user_mbuf.c */
+
+/*
+ * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
+ * be both the local data payload, or an external buffer area, depending on
+ * whether M_EXT is set).
+ */
+#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \
+ (!(((m)->m_flags & M_EXT)) || \
+ (*((m)->m_ext.ref_cnt) == 1)) ) \
+
+
+/*
+ * Compute the amount of space available before the current start of data in
+ * an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_LEADINGSPACE(m) \
+ (((m)->m_flags & M_EXT) ? \
+ (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
+ ((m)->m_flags & M_PKTHDR)? (m)->m_data - (m)->m_pktdat : \
+ (m)->m_data - (m)->m_dat)
+
+/*
+ * Compute the amount of space available after the end of data in an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_TRAILINGSPACE(m) \
+ (((m)->m_flags & M_EXT) ? \
+ (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
+ - ((m)->m_data + (m)->m_len) : 0) : \
+ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
+
+
+
+/*
+ * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be
+ * allocated, how specifies whether to wait. If the allocation fails, the
+ * original mbuf chain is freed and m is set to NULL.
+ */
+#define M_PREPEND(m, plen, how) do { \
+ struct mbuf **_mmp = &(m); \
+ struct mbuf *_mm = *_mmp; \
+ int _mplen = (plen); \
+ int __mhow = (how); \
+ \
+ if (M_LEADINGSPACE(_mm) >= _mplen) { \
+ _mm->m_data -= _mplen; \
+ _mm->m_len += _mplen; \
+ } else \
+ _mm = m_prepend(_mm, _mplen, __mhow); \
+ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
+ _mm->m_pkthdr.len += _mplen; \
+ *_mmp = _mm; \
+} while (0)
+
+/*
+ * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place an
+ * object of the specified size at the end of the mbuf, longword aligned.
+ */
+#define M_ALIGN(m, len) do { \
+ KASSERT(!((m)->m_flags & (M_PKTHDR|M_EXT)), \
+ ("%s: M_ALIGN not normal mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_dat, \
+ ("%s: M_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+/*
+ * As above, for mbufs allocated with m_gethdr/MGETHDR or initialized by
+ * M_DUP/MOVE_PKTHDR.
+ */
+#define MH_ALIGN(m, len) do { \
+ KASSERT((m)->m_flags & M_PKTHDR && !((m)->m_flags & M_EXT), \
+ ("%s: MH_ALIGN not PKTHDR mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_pktdat, \
+ ("%s: MH_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+#define M_SIZE(m) \
+ (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \
+ ((m)->m_flags & M_PKTHDR) ? MHLEN : \
+ MLEN)
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_queue.h b/ext/sctp/usrsctp/usrsctplib/user_queue.h
new file mode 100644
index 000000000..cde1ccecf
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_queue.h
@@ -0,0 +1,639 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _USER_QUEUE_H_
+#define _USER_QUEUE_H_
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ
+ * _HEAD + + + +
+ * _HEAD_INITIALIZER + + + +
+ * _ENTRY + + + +
+ * _INIT + + + +
+ * _EMPTY + + + +
+ * _FIRST + + + +
+ * _NEXT + + + +
+ * _PREV - - - +
+ * _LAST - - + +
+ * _FOREACH + + + +
+ * _FOREACH_SAFE + + + +
+ * _FOREACH_REVERSE - - - +
+ * _FOREACH_REVERSE_SAFE - - - +
+ * _INSERT_HEAD + + + +
+ * _INSERT_BEFORE - + - +
+ * _INSERT_AFTER + + + +
+ * _INSERT_TAIL - - + +
+ * _CONCAT - - + +
+ * _REMOVE_AFTER + - + -
+ * _REMOVE_HEAD + - + -
+ * _REMOVE + + + +
+ * _SWAP + + + +
+ *
+ */
+#ifdef QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+ char * lastfile;
+ int lastline;
+ char * prevfile;
+ int prevline;
+};
+
+#define TRACEBUF struct qm_trace trace;
+#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
+#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
+
+#define QMD_TRACE_HEAD(head) do { \
+ (head)->trace.prevline = (head)->trace.lastline; \
+ (head)->trace.prevfile = (head)->trace.lastfile; \
+ (head)->trace.lastline = __LINE__; \
+ (head)->trace.lastfile = __FILE__; \
+} while (0)
+
+#define QMD_TRACE_ELEM(elem) do { \
+ (elem)->trace.prevline = (elem)->trace.lastline; \
+ (elem)->trace.prevfile = (elem)->trace.lastfile; \
+ (elem)->trace.lastline = __LINE__; \
+ (elem)->trace.lastfile = __FILE__; \
+} while (0)
+
+#else
+#define QMD_TRACE_ELEM(elem)
+#define QMD_TRACE_HEAD(head)
+#define QMD_SAVELINK(name, link)
+#define TRACEBUF
+#define TRASHIT(x)
+#endif /* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#if defined(_WIN32)
+#if defined(SLIST_ENTRY)
+#undef SLIST_ENTRY
+#endif
+#endif
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &SLIST_FIRST((head)); \
+ ((var) = *(varp)) != NULL; \
+ (varp) = &SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (0)
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_REMOVE_AFTER(curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define SLIST_REMOVE_AFTER(elm, field) do { \
+ SLIST_NEXT(elm, field) = \
+ SLIST_NEXT(SLIST_NEXT(elm, field), field); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (0)
+
+#define SLIST_SWAP(head1, head2, type) do { \
+ struct type *swap_first = SLIST_FIRST(head1); \
+ SLIST_FIRST(head1) = SLIST_FIRST(head2); \
+ SLIST_FIRST(head2) = swap_first; \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (0)
+
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ STAILQ_REMOVE_AFTER(head, curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
+ if ((STAILQ_NEXT(elm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_SWAP(head1, head2, type) do { \
+ struct type *swap_first = STAILQ_FIRST(head1); \
+ struct type **swap_last = (head1)->stqh_last; \
+ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_FIRST(head2) = swap_first; \
+ (head2)->stqh_last = swap_last; \
+ if (STAILQ_EMPTY(head1)) \
+ (head1)->stqh_last = &STAILQ_FIRST(head1); \
+ if (STAILQ_EMPTY(head2)) \
+ (head2)->stqh_last = &STAILQ_FIRST(head2); \
+} while (0)
+
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#if defined(INVARIANTS)
+#define QMD_LIST_CHECK_HEAD(head, field) do { \
+ if (LIST_FIRST((head)) != NULL && \
+ LIST_FIRST((head))->field.le_prev != \
+ &LIST_FIRST((head))) \
+ panic("Bad list head %p first->prev != head", (void *)(head)); \
+} while (0)
+
+#define QMD_LIST_CHECK_NEXT(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL && \
+ LIST_NEXT((elm), field)->field.le_prev != \
+ &((elm)->field.le_next)) \
+ panic("Bad link elm %p next->prev != elm", (void *)(elm)); \
+} while (0)
+
+#define QMD_LIST_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.le_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (void *)(elm)); \
+} while (0)
+#else
+#define QMD_LIST_CHECK_HEAD(head, field)
+#define QMD_LIST_CHECK_NEXT(elm, field)
+#define QMD_LIST_CHECK_PREV(elm, field)
+#endif /* (INVARIANTS) */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QMD_LIST_CHECK_NEXT(listelm, field); \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_LIST_CHECK_PREV(listelm, field); \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QMD_LIST_CHECK_HEAD((head), field); \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (0)
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_REMOVE(elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.le_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
+ QMD_LIST_CHECK_NEXT(elm, field); \
+ QMD_LIST_CHECK_PREV(elm, field); \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+} while (0)
+
+#define LIST_SWAP(head1, head2, type, field) do { \
+ struct type *swap_tmp = LIST_FIRST((head1)); \
+ LIST_FIRST((head1)) = LIST_FIRST((head2)); \
+ LIST_FIRST((head2)) = swap_tmp; \
+ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
+ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+ TRACEBUF \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+ TRACEBUF \
+}
+
+/*
+ * Tail queue functions.
+ */
+#if defined(INVARIANTS)
+#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
+ if (!TAILQ_EMPTY(head) && \
+ TAILQ_FIRST((head))->field.tqe_prev != \
+ &TAILQ_FIRST((head))) \
+ panic("Bad tailq head %p first->prev != head", (void *)(head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
+ if (*(head)->tqh_last != NULL) \
+ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (void *)(head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
+ if (TAILQ_NEXT((elm), field) != NULL && \
+ TAILQ_NEXT((elm), field)->field.tqe_prev != \
+ &((elm)->field.tqe_next)) \
+ panic("Bad link elm %p next->prev != elm", (void *)(elm)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.tqe_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (void *)(elm)); \
+} while (0)
+#else
+#define QMD_TAILQ_CHECK_HEAD(head, field)
+#define QMD_TAILQ_CHECK_TAIL(head, headname)
+#define QMD_TAILQ_CHECK_NEXT(elm, field)
+#define QMD_TAILQ_CHECK_PREV(elm, field)
+#endif /* (INVARIANTS) */
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ QMD_TRACE_HEAD(head1); \
+ QMD_TRACE_HEAD(head2); \
+ } \
+} while (0)
+
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_NEXT(listelm, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else { \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ } \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_PREV(listelm, field); \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ QMD_TAILQ_CHECK_HEAD(head, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ QMD_TAILQ_CHECK_TAIL(head, field); \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
+ QMD_TAILQ_CHECK_NEXT(elm, field); \
+ QMD_TAILQ_CHECK_PREV(elm, field); \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else { \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ QMD_TRACE_HEAD(head); \
+ } \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_SWAP(head1, head2, type, field) do { \
+ struct type *swap_first = (head1)->tqh_first; \
+ struct type **swap_last = (head1)->tqh_last; \
+ (head1)->tqh_first = (head2)->tqh_first; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ (head2)->tqh_first = swap_first; \
+ (head2)->tqh_last = swap_last; \
+ if ((swap_first = (head1)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head1)->tqh_first; \
+ else \
+ (head1)->tqh_last = &(head1)->tqh_first; \
+ if ((swap_first = (head2)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head2)->tqh_first; \
+ else \
+ (head2)->tqh_last = &(head2)->tqh_first; \
+} while (0)
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/user_recv_thread.c b/ext/sctp/usrsctp/usrsctplib/user_recv_thread.c
new file mode 100644
index 000000000..3c1657b4a
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_recv_thread.c
@@ -0,0 +1,1521 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#if defined(INET) || defined(INET6)
+#include <sys/types.h>
+#if !defined(_WIN32)
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <unistd.h>
+#include <pthread.h>
+#if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
+#include <sys/uio.h>
+#else
+#include <user_ip6_var.h>
+#endif
+#endif
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_input.h>
+#if 0
+#if defined(__linux__)
+#include <linux/netlink.h>
+#ifdef HAVE_LINUX_IF_ADDR_H
+#include <linux/if_addr.h>
+#endif
+#ifdef HAVE_LINUX_RTNETLINK_H
+#include <linux/rtnetlink.h>
+#endif
+#endif
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+#include <net/route.h>
+#endif
+/* local macros and datatypes used to get IP addresses system independently */
+#if !defined(IP_PKTINFO ) && !defined(IP_RECVDSTADDR)
+# error "Can't determine socket option to use to get UDP IP"
+#endif
+
+void recv_thread_destroy(void);
+
+#define MAXLEN_MBUF_CHAIN 128
+
+#define ROUNDUP(a, size) (((a) & ((size)-1)) ? (1 + ((a) | ((size)-1))) : (a))
+
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+#define NEXT_SA(ap) ap = (struct sockaddr *) \
+ ((caddr_t) ap + (ap->sa_len ? ROUNDUP(ap->sa_len, sizeof (uint32_t)) : sizeof(uint32_t)))
+#endif
+
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+static void
+sctp_get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
+{
+ int i;
+
+ for (i = 0; i < RTAX_MAX; i++) {
+ if (addrs & (1 << i)) {
+ rti_info[i] = sa;
+ NEXT_SA(sa);
+ } else {
+ rti_info[i] = NULL;
+ }
+ }
+}
+
+static void
+sctp_handle_ifamsg(unsigned char type, unsigned short index, struct sockaddr *sa)
+{
+ int rc;
+ struct ifaddrs *ifa, *ifas;
+
+ /* handle only the types we want */
+ if ((type != RTM_NEWADDR) && (type != RTM_DELADDR)) {
+ return;
+ }
+
+ rc = getifaddrs(&ifas);
+ if (rc != 0) {
+ return;
+ }
+ for (ifa = ifas; ifa; ifa = ifa->ifa_next) {
+ if (index == if_nametoindex(ifa->ifa_name)) {
+ break;
+ }
+ }
+ if (ifa == NULL) {
+ freeifaddrs(ifas);
+ return;
+ }
+
+ /* relay the appropriate address change to the base code */
+ if (type == RTM_NEWADDR) {
+ (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID,
+ NULL,
+ if_nametoindex(ifa->ifa_name),
+ 0,
+ ifa->ifa_name,
+ NULL,
+ sa,
+ 0,
+ 1);
+ } else {
+ sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
+ if_nametoindex(ifa->ifa_name),
+ ifa->ifa_name);
+ }
+ freeifaddrs(ifas);
+}
+
+static void *
+recv_function_route(void *arg)
+{
+ ssize_t ret;
+ struct ifa_msghdr *ifa;
+ char rt_buffer[1024];
+ struct sockaddr *sa, *rti_info[RTAX_MAX];
+
+ sctp_userspace_set_threadname("SCTP addr mon");
+
+ while (1) {
+ memset(rt_buffer, 0, sizeof(rt_buffer));
+ ret = recv(SCTP_BASE_VAR(userspace_route), rt_buffer, sizeof(rt_buffer), 0);
+
+ if (ret > 0) {
+ ifa = (struct ifa_msghdr *) rt_buffer;
+ if (ifa->ifam_type != RTM_DELADDR && ifa->ifam_type != RTM_NEWADDR) {
+ continue;
+ }
+ sa = (struct sockaddr *) (ifa + 1);
+ sctp_get_rtaddrs(ifa->ifam_addrs, sa, rti_info);
+ switch (ifa->ifam_type) {
+ case RTM_DELADDR:
+ case RTM_NEWADDR:
+ sctp_handle_ifamsg(ifa->ifam_type, ifa->ifam_index, rti_info[RTAX_IFA]);
+ break;
+ default:
+ /* ignore this routing event */
+ break;
+ }
+ }
+ if (ret < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+ return (NULL);
+}
+#endif
+
+#if 0
+/* This does not yet work on Linux */
+static void *
+recv_function_route(void *arg)
+{
+ int len;
+ char buf[4096];
+ struct iovec iov = { buf, sizeof(buf) };
+ struct msghdr msg;
+ struct nlmsghdr *nh;
+ struct ifaddrmsg *rtmsg;
+ struct rtattr *rtatp;
+ struct in_addr *inp;
+ struct sockaddr_nl sanl;
+#ifdef INET
+ struct sockaddr_in *sa;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sa6;
+#endif
+
+ for (;;) {
+ memset(&sanl, 0, sizeof(sanl));
+ sanl.nl_family = AF_NETLINK;
+ sanl.nl_groups = RTMGRP_IPV6_IFADDR | RTMGRP_IPV4_IFADDR;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_name = (void *)&sanl;
+ msg.msg_namelen = sizeof(sanl);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+
+ len = recvmsg(SCTP_BASE_VAR(userspace_route), &msg, 0);
+
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ for (nh = (struct nlmsghdr *) buf; NLMSG_OK (nh, len);
+ nh = NLMSG_NEXT (nh, len)) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ break;
+
+ if (nh->nlmsg_type == RTM_NEWADDR || nh->nlmsg_type == RTM_DELADDR) {
+ rtmsg = (struct ifaddrmsg *)NLMSG_DATA(nh);
+ rtatp = (struct rtattr *)IFA_RTA(rtmsg);
+ if (rtatp->rta_type == IFA_ADDRESS) {
+ inp = (struct in_addr *)RTA_DATA(rtatp);
+ switch (rtmsg->ifa_family) {
+#ifdef INET
+ case AF_INET:
+ sa = (struct sockaddr_in *)malloc(sizeof(struct sockaddr_in));
+ sa->sin_family = rtmsg->ifa_family;
+ sa->sin_port = 0;
+ memcpy(&sa->sin_addr, inp, sizeof(struct in_addr));
+ sctp_handle_ifamsg(nh->nlmsg_type, rtmsg->ifa_index, (struct sockaddr *)sa);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)malloc(sizeof(struct sockaddr_in6));
+ sa6->sin6_family = rtmsg->ifa_family;
+ sa6->sin6_port = 0;
+ memcpy(&sa6->sin6_addr, inp, sizeof(struct in6_addr));
+ sctp_handle_ifamsg(nh->nlmsg_type, rtmsg->ifa_index, (struct sockaddr *)sa6);
+ break;
+#endif
+ default:
+ SCTPDBG(SCTP_DEBUG_USR, "Address family %d not supported.\n", rtmsg->ifa_family);
+ break;
+ }
+ }
+ }
+ }
+ }
+ return (NULL);
+}
+#endif
+
+#ifdef INET
+static void *
+recv_function_raw(void *arg)
+{
+ struct mbuf **recvmbuf;
+ struct ip *iphdr;
+ struct sctphdr *sh;
+ uint16_t port;
+ int offset, ecn = 0;
+ int compute_crc = 1;
+ struct sctp_chunkhdr *ch;
+ struct sockaddr_in src, dst;
+#if !defined(_WIN32)
+ unsigned int ncounter;
+ struct msghdr msg;
+ struct iovec recv_iovec[MAXLEN_MBUF_CHAIN];
+#else
+ WSABUF recv_iovec[MAXLEN_MBUF_CHAIN];
+ int nResult, m_ErrorCode;
+ DWORD flags;
+ DWORD ncounter;
+ struct sockaddr_in from;
+ int fromlen;
+#endif
+ /*Initially the entire set of mbufs is to be allocated.
+ to_fill indicates this amount. */
+ int to_fill = MAXLEN_MBUF_CHAIN;
+ /* iovlen is the size of each mbuf in the chain */
+ int i, n;
+ unsigned int iovlen = MCLBYTES;
+ int want_ext = (iovlen > MLEN)? 1 : 0;
+ int want_header = 0;
+
+ sctp_userspace_set_threadname("SCTP/IP4 rcv");
+
+ memset(&src, 0, sizeof(struct sockaddr_in));
+ memset(&dst, 0, sizeof(struct sockaddr_in));
+
+ recvmbuf = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+
+ while (1) {
+ for (i = 0; i < to_fill; i++) {
+ /* Not getting the packet header. Tests with chain of one run
+ as usual without having the packet header.
+ Have tried both sending and receiving
+ */
+ recvmbuf[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA);
+#if !defined(_WIN32)
+ recv_iovec[i].iov_base = (caddr_t)recvmbuf[i]->m_data;
+ recv_iovec[i].iov_len = iovlen;
+#else
+ recv_iovec[i].buf = (caddr_t)recvmbuf[i]->m_data;
+ recv_iovec[i].len = iovlen;
+#endif
+ }
+ to_fill = 0;
+#if defined(_WIN32)
+ flags = 0;
+ ncounter = 0;
+ fromlen = sizeof(struct sockaddr_in);
+ memset(&from, 0, sizeof(struct sockaddr_in));
+
+ nResult = WSARecvFrom(SCTP_BASE_VAR(userspace_rawsctp), recv_iovec, MAXLEN_MBUF_CHAIN, &ncounter, &flags, (struct sockaddr *)&from, &fromlen, NULL, NULL);
+ if (nResult != 0) {
+ m_ErrorCode = WSAGetLastError();
+ if ((m_ErrorCode == WSAENOTSOCK) || (m_ErrorCode == WSAEINTR)) {
+ break;
+ }
+ continue;
+ }
+ n = ncounter;
+#else
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = recv_iovec;
+ msg.msg_iovlen = MAXLEN_MBUF_CHAIN;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg, 0);
+ if (n < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+#endif
+ SCTP_HEADER_LEN(recvmbuf[0]) = n; /* length of total packet */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+ if ((unsigned int)n <= iovlen) {
+ SCTP_BUF_LEN(recvmbuf[0]) = n;
+ (to_fill)++;
+ } else {
+ i = 0;
+ SCTP_BUF_LEN(recvmbuf[0]) = iovlen;
+
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ do {
+ recvmbuf[i]->m_next = recvmbuf[i+1];
+ SCTP_BUF_LEN(recvmbuf[i]->m_next) = min(ncounter, iovlen);
+ i++;
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ } while (ncounter > 0);
+ }
+
+ iphdr = mtod(recvmbuf[0], struct ip *);
+ sh = (struct sctphdr *)((caddr_t)iphdr + sizeof(struct ip));
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset = sizeof(struct ip) + sizeof(struct sctphdr);
+
+ if (iphdr->ip_tos != 0) {
+ ecn = iphdr->ip_tos & 0x02;
+ }
+
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ dst.sin_addr = iphdr->ip_dst;
+ dst.sin_port = sh->dest_port;
+
+ src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ src.sin_len = sizeof(struct sockaddr_in);
+#endif
+ src.sin_addr = iphdr->ip_src;
+ src.sin_port = sh->src_port;
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
+ m_freem(recvmbuf[0]);
+ continue;
+ }
+ if (SCTP_IS_IT_BROADCAST(dst.sin_addr, recvmbuf[0])) {
+ m_freem(recvmbuf[0]);
+ continue;
+ }
+
+ port = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ ((IN4_ISLOOPBACK_ADDRESS(&src.sin_addr) &&
+ IN4_ISLOOPBACK_ADDRESS(&dst.sin_addr)) ||
+ (src.sin_addr.s_addr == dst.sin_addr.s_addr))) {
+ compute_crc = 0;
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ }
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Received %d bytes.", __func__, n);
+ SCTPDBG(SCTP_DEBUG_USR, " - calling sctp_common_input_processing with off=%d\n", offset);
+ sctp_common_input_processing(&recvmbuf[0], sizeof(struct ip), offset, n,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ ecn,
+ SCTP_DEFAULT_VRFID, port);
+ if (recvmbuf[0]) {
+ m_freem(recvmbuf[0]);
+ }
+ }
+ for (i = 0; i < MAXLEN_MBUF_CHAIN; i++) {
+ m_free(recvmbuf[i]);
+ }
+ /* free the array itself */
+ free(recvmbuf);
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP4 rcv\n", __func__);
+ return (NULL);
+}
+#endif
+
+#if defined(INET6)
+static void *
+recv_function_raw6(void *arg)
+{
+ struct mbuf **recvmbuf6;
+#if !defined(_WIN32)
+ unsigned int ncounter = 0;
+ struct iovec recv_iovec[MAXLEN_MBUF_CHAIN];
+ struct msghdr msg;
+ struct cmsghdr *cmsgptr;
+ char cmsgbuf[CMSG_SPACE(sizeof (struct in6_pktinfo))];
+#else
+ WSABUF recv_iovec[MAXLEN_MBUF_CHAIN];
+ int nResult, m_ErrorCode;
+ DWORD ncounter = 0;
+ struct sockaddr_in6 from;
+ GUID WSARecvMsg_GUID = WSAID_WSARECVMSG;
+ LPFN_WSARECVMSG WSARecvMsg;
+ WSACMSGHDR *cmsgptr;
+ WSAMSG msg;
+ char ControlBuffer[1024];
+#endif
+ struct sockaddr_in6 src, dst;
+ struct sctphdr *sh;
+ int offset;
+ struct sctp_chunkhdr *ch;
+ /*Initially the entire set of mbufs is to be allocated.
+ to_fill indicates this amount. */
+ int to_fill = MAXLEN_MBUF_CHAIN;
+ /* iovlen is the size of each mbuf in the chain */
+ int i, n;
+ int compute_crc = 1;
+ unsigned int iovlen = MCLBYTES;
+ int want_ext = (iovlen > MLEN)? 1 : 0;
+ int want_header = 0;
+
+ sctp_userspace_set_threadname("SCTP/IP6 rcv");
+
+ recvmbuf6 = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+
+ for (;;) {
+ for (i = 0; i < to_fill; i++) {
+ /* Not getting the packet header. Tests with chain of one run
+ as usual without having the packet header.
+ Have tried both sending and receiving
+ */
+ recvmbuf6[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA);
+#if !defined(_WIN32)
+ recv_iovec[i].iov_base = (caddr_t)recvmbuf6[i]->m_data;
+ recv_iovec[i].iov_len = iovlen;
+#else
+ recv_iovec[i].buf = (caddr_t)recvmbuf6[i]->m_data;
+ recv_iovec[i].len = iovlen;
+#endif
+ }
+ to_fill = 0;
+#if defined(_WIN32)
+ ncounter = 0;
+ memset(&from, 0, sizeof(struct sockaddr_in6));
+ nResult = WSAIoctl(SCTP_BASE_VAR(userspace_rawsctp6), SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &WSARecvMsg_GUID, sizeof WSARecvMsg_GUID,
+ &WSARecvMsg, sizeof WSARecvMsg,
+ &ncounter, NULL, NULL);
+ if (nResult == 0) {
+ msg.name = (void *)&src;
+ msg.namelen = sizeof(struct sockaddr_in6);
+ msg.lpBuffers = recv_iovec;
+ msg.dwBufferCount = MAXLEN_MBUF_CHAIN;
+ msg.Control.len = sizeof ControlBuffer;
+ msg.Control.buf = ControlBuffer;
+ msg.dwFlags = 0;
+ nResult = WSARecvMsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg, &ncounter, NULL, NULL);
+ }
+ if (nResult != 0) {
+ m_ErrorCode = WSAGetLastError();
+ if ((m_ErrorCode == WSAENOTSOCK) || (m_ErrorCode == WSAEINTR)) {
+ break;
+ }
+ continue;
+ }
+ n = ncounter;
+#else
+ memset(&msg, 0, sizeof(struct msghdr));
+ memset(&src, 0, sizeof(struct sockaddr_in6));
+ memset(&dst, 0, sizeof(struct sockaddr_in6));
+ memset(cmsgbuf, 0, CMSG_SPACE(sizeof (struct in6_pktinfo)));
+ msg.msg_name = (void *)&src;
+ msg.msg_namelen = sizeof(struct sockaddr_in6);
+ msg.msg_iov = recv_iovec;
+ msg.msg_iovlen = MAXLEN_MBUF_CHAIN;
+ msg.msg_control = (void *)cmsgbuf;
+ msg.msg_controllen = (socklen_t)CMSG_SPACE(sizeof (struct in6_pktinfo));
+ msg.msg_flags = 0;
+
+ ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg, 0);
+ if (n < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+#endif
+ SCTP_HEADER_LEN(recvmbuf6[0]) = n; /* length of total packet */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+ if ((unsigned int)n <= iovlen) {
+ SCTP_BUF_LEN(recvmbuf6[0]) = n;
+ (to_fill)++;
+ } else {
+ i = 0;
+ SCTP_BUF_LEN(recvmbuf6[0]) = iovlen;
+
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ do {
+ recvmbuf6[i]->m_next = recvmbuf6[i+1];
+ SCTP_BUF_LEN(recvmbuf6[i]->m_next) = min(ncounter, iovlen);
+ i++;
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ } while (ncounter > 0);
+ }
+
+ for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) {
+ if ((cmsgptr->cmsg_level == IPPROTO_IPV6) && (cmsgptr->cmsg_type == IPV6_PKTINFO)) {
+ struct in6_pktinfo * info;
+
+ info = (struct in6_pktinfo *)CMSG_DATA(cmsgptr);
+ memcpy((void *)&dst.sin6_addr, (const void *) &(info->ipi6_addr), sizeof(struct in6_addr));
+ break;
+ }
+ }
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN6_IS_ADDR_MULTICAST(&dst.sin6_addr)) {
+ m_freem(recvmbuf6[0]);
+ continue;
+ }
+
+ sh = mtod(recvmbuf6[0], struct sctphdr *);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset = sizeof(struct sctphdr);
+
+ dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ dst.sin6_port = sh->dest_port;
+
+ src.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ src.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ src.sin6_port = sh->src_port;
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0)) {
+ compute_crc = 0;
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ }
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Received %d bytes.", __func__, n);
+ SCTPDBG(SCTP_DEBUG_USR, " - calling sctp_common_input_processing with off=%d\n", offset);
+ sctp_common_input_processing(&recvmbuf6[0], 0, offset, n,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ 0,
+ SCTP_DEFAULT_VRFID, 0);
+ if (recvmbuf6[0]) {
+ m_freem(recvmbuf6[0]);
+ }
+ }
+ for (i = 0; i < MAXLEN_MBUF_CHAIN; i++) {
+ m_free(recvmbuf6[i]);
+ }
+ /* free the array itself */
+ free(recvmbuf6);
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP6 rcv\n", __func__);
+ return (NULL);
+}
+#endif
+
+#ifdef INET
+static void *
+recv_function_udp(void *arg)
+{
+ struct mbuf **udprecvmbuf;
+ /*Initially the entire set of mbufs is to be allocated.
+ to_fill indicates this amount. */
+ int to_fill = MAXLEN_MBUF_CHAIN;
+ /* iovlen is the size of each mbuf in the chain */
+ int i, n, offset;
+ unsigned int iovlen = MCLBYTES;
+ int want_ext = (iovlen > MLEN)? 1 : 0;
+ int want_header = 0;
+ struct sctphdr *sh;
+ uint16_t port;
+ struct sctp_chunkhdr *ch;
+ struct sockaddr_in src, dst;
+#if defined(IP_PKTINFO)
+ char cmsgbuf[CMSG_SPACE(sizeof(struct in_pktinfo))];
+#else
+ char cmsgbuf[CMSG_SPACE(sizeof(struct in_addr))];
+#endif
+ int compute_crc = 1;
+#if !defined(_WIN32)
+ unsigned int ncounter;
+ struct iovec iov[MAXLEN_MBUF_CHAIN];
+ struct msghdr msg;
+ struct cmsghdr *cmsgptr;
+#else
+ GUID WSARecvMsg_GUID = WSAID_WSARECVMSG;
+ LPFN_WSARECVMSG WSARecvMsg;
+ char ControlBuffer[1024];
+ WSABUF iov[MAXLEN_MBUF_CHAIN];
+ WSAMSG msg;
+ int nResult, m_ErrorCode;
+ WSACMSGHDR *cmsgptr;
+ DWORD ncounter;
+#endif
+
+ sctp_userspace_set_threadname("SCTP/UDP/IP4 rcv");
+
+ udprecvmbuf = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+
+ while (1) {
+ for (i = 0; i < to_fill; i++) {
+ /* Not getting the packet header. Tests with chain of one run
+ as usual without having the packet header.
+ Have tried both sending and receiving
+ */
+ udprecvmbuf[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA);
+#if !defined(_WIN32)
+ iov[i].iov_base = (caddr_t)udprecvmbuf[i]->m_data;
+ iov[i].iov_len = iovlen;
+#else
+ iov[i].buf = (caddr_t)udprecvmbuf[i]->m_data;
+ iov[i].len = iovlen;
+#endif
+ }
+ to_fill = 0;
+#if !defined(_WIN32)
+ memset(&msg, 0, sizeof(struct msghdr));
+#else
+ memset(&msg, 0, sizeof(WSAMSG));
+#endif
+ memset(&src, 0, sizeof(struct sockaddr_in));
+ memset(&dst, 0, sizeof(struct sockaddr_in));
+ memset(cmsgbuf, 0, sizeof(cmsgbuf));
+
+#if !defined(_WIN32)
+ msg.msg_name = (void *)&src;
+ msg.msg_namelen = sizeof(struct sockaddr_in);
+ msg.msg_iov = iov;
+ msg.msg_iovlen = MAXLEN_MBUF_CHAIN;
+ msg.msg_control = (void *)cmsgbuf;
+ msg.msg_controllen = sizeof(cmsgbuf);
+ msg.msg_flags = 0;
+
+ ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg, 0);
+ if (n < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+#else
+ nResult = WSAIoctl(SCTP_BASE_VAR(userspace_udpsctp), SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &WSARecvMsg_GUID, sizeof WSARecvMsg_GUID,
+ &WSARecvMsg, sizeof WSARecvMsg,
+ &ncounter, NULL, NULL);
+ if (nResult == 0) {
+ msg.name = (void *)&src;
+ msg.namelen = sizeof(struct sockaddr_in);
+ msg.lpBuffers = iov;
+ msg.dwBufferCount = MAXLEN_MBUF_CHAIN;
+ msg.Control.len = sizeof ControlBuffer;
+ msg.Control.buf = ControlBuffer;
+ msg.dwFlags = 0;
+ nResult = WSARecvMsg(SCTP_BASE_VAR(userspace_udpsctp), &msg, &ncounter, NULL, NULL);
+ }
+ if (nResult != 0) {
+ m_ErrorCode = WSAGetLastError();
+ if ((m_ErrorCode == WSAENOTSOCK) || (m_ErrorCode == WSAEINTR)) {
+ break;
+ }
+ continue;
+ }
+ n = ncounter;
+#endif
+ SCTP_HEADER_LEN(udprecvmbuf[0]) = n; /* length of total packet */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+ if ((unsigned int)n <= iovlen) {
+ SCTP_BUF_LEN(udprecvmbuf[0]) = n;
+ (to_fill)++;
+ } else {
+ i = 0;
+ SCTP_BUF_LEN(udprecvmbuf[0]) = iovlen;
+
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ do {
+ udprecvmbuf[i]->m_next = udprecvmbuf[i+1];
+ SCTP_BUF_LEN(udprecvmbuf[i]->m_next) = min(ncounter, iovlen);
+ i++;
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ } while (ncounter > 0);
+ }
+
+ for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) {
+#if defined(IP_PKTINFO)
+ if ((cmsgptr->cmsg_level == IPPROTO_IP) && (cmsgptr->cmsg_type == IP_PKTINFO)) {
+ struct in_pktinfo *info;
+
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ info = (struct in_pktinfo *)CMSG_DATA(cmsgptr);
+ memcpy((void *)&dst.sin_addr, (const void *)&(info->ipi_addr), sizeof(struct in_addr));
+ break;
+ }
+#else
+ if ((cmsgptr->cmsg_level == IPPROTO_IP) && (cmsgptr->cmsg_type == IP_RECVDSTADDR)) {
+ struct in_addr *addr;
+
+ dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ addr = (struct in_addr *)CMSG_DATA(cmsgptr);
+ memcpy((void *)&dst.sin_addr, (const void *)addr, sizeof(struct in_addr));
+ break;
+ }
+#endif
+ }
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
+ m_freem(udprecvmbuf[0]);
+ continue;
+ }
+ if (SCTP_IS_IT_BROADCAST(dst.sin_addr, udprecvmbuf[0])) {
+ m_freem(udprecvmbuf[0]);
+ continue;
+ }
+
+ /*offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);*/
+ sh = mtod(udprecvmbuf[0], struct sctphdr *);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset = sizeof(struct sctphdr);
+ port = src.sin_port;
+ src.sin_port = sh->src_port;
+ dst.sin_port = sh->dest_port;
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (src.sin_addr.s_addr == dst.sin_addr.s_addr)) {
+ compute_crc = 0;
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ }
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Received %d bytes.", __func__, n);
+ SCTPDBG(SCTP_DEBUG_USR, " - calling sctp_common_input_processing with off=%d\n", offset);
+ sctp_common_input_processing(&udprecvmbuf[0], 0, offset, n,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ 0,
+ SCTP_DEFAULT_VRFID, port);
+ if (udprecvmbuf[0]) {
+ m_freem(udprecvmbuf[0]);
+ }
+ }
+ for (i = 0; i < MAXLEN_MBUF_CHAIN; i++) {
+ m_free(udprecvmbuf[i]);
+ }
+ /* free the array itself */
+ free(udprecvmbuf);
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP4 rcv\n", __func__);
+ return (NULL);
+}
+#endif
+
+#if defined(INET6)
+static void *
+recv_function_udp6(void *arg)
+{
+ struct mbuf **udprecvmbuf6;
+ /*Initially the entire set of mbufs is to be allocated.
+ to_fill indicates this amount. */
+ int to_fill = MAXLEN_MBUF_CHAIN;
+ /* iovlen is the size of each mbuf in the chain */
+ int i, n, offset;
+ unsigned int iovlen = MCLBYTES;
+ int want_ext = (iovlen > MLEN)? 1 : 0;
+ int want_header = 0;
+ struct sockaddr_in6 src, dst;
+ struct sctphdr *sh;
+ uint16_t port;
+ struct sctp_chunkhdr *ch;
+ char cmsgbuf[CMSG_SPACE(sizeof (struct in6_pktinfo))];
+ int compute_crc = 1;
+#if !defined(_WIN32)
+ struct iovec iov[MAXLEN_MBUF_CHAIN];
+ struct msghdr msg;
+ struct cmsghdr *cmsgptr;
+ unsigned int ncounter;
+#else
+ GUID WSARecvMsg_GUID = WSAID_WSARECVMSG;
+ LPFN_WSARECVMSG WSARecvMsg;
+ char ControlBuffer[1024];
+ WSABUF iov[MAXLEN_MBUF_CHAIN];
+ WSAMSG msg;
+ int nResult, m_ErrorCode;
+ WSACMSGHDR *cmsgptr;
+ DWORD ncounter;
+#endif
+
+ sctp_userspace_set_threadname("SCTP/UDP/IP6 rcv");
+
+ udprecvmbuf6 = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+ while (1) {
+ for (i = 0; i < to_fill; i++) {
+ /* Not getting the packet header. Tests with chain of one run
+ as usual without having the packet header.
+ Have tried both sending and receiving
+ */
+ udprecvmbuf6[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA);
+#if !defined(_WIN32)
+ iov[i].iov_base = (caddr_t)udprecvmbuf6[i]->m_data;
+ iov[i].iov_len = iovlen;
+#else
+ iov[i].buf = (caddr_t)udprecvmbuf6[i]->m_data;
+ iov[i].len = iovlen;
+#endif
+ }
+ to_fill = 0;
+
+#if !defined(_WIN32)
+ memset(&msg, 0, sizeof(struct msghdr));
+#else
+ memset(&msg, 0, sizeof(WSAMSG));
+#endif
+ memset(&src, 0, sizeof(struct sockaddr_in6));
+ memset(&dst, 0, sizeof(struct sockaddr_in6));
+ memset(cmsgbuf, 0, CMSG_SPACE(sizeof (struct in6_pktinfo)));
+
+#if !defined(_WIN32)
+ msg.msg_name = (void *)&src;
+ msg.msg_namelen = sizeof(struct sockaddr_in6);
+ msg.msg_iov = iov;
+ msg.msg_iovlen = MAXLEN_MBUF_CHAIN;
+ msg.msg_control = (void *)cmsgbuf;
+ msg.msg_controllen = (socklen_t)CMSG_SPACE(sizeof (struct in6_pktinfo));
+ msg.msg_flags = 0;
+
+ ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg, 0);
+ if (n < 0) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else {
+ break;
+ }
+ }
+#else
+ nResult = WSAIoctl(SCTP_BASE_VAR(userspace_udpsctp6), SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &WSARecvMsg_GUID, sizeof WSARecvMsg_GUID,
+ &WSARecvMsg, sizeof WSARecvMsg,
+ &ncounter, NULL, NULL);
+ if (nResult == SOCKET_ERROR) {
+ m_ErrorCode = WSAGetLastError();
+ WSARecvMsg = NULL;
+ }
+ if (nResult == 0) {
+ msg.name = (void *)&src;
+ msg.namelen = sizeof(struct sockaddr_in6);
+ msg.lpBuffers = iov;
+ msg.dwBufferCount = MAXLEN_MBUF_CHAIN;
+ msg.Control.len = sizeof ControlBuffer;
+ msg.Control.buf = ControlBuffer;
+ msg.dwFlags = 0;
+ nResult = WSARecvMsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg, &ncounter, NULL, NULL);
+ }
+ if (nResult != 0) {
+ m_ErrorCode = WSAGetLastError();
+ if ((m_ErrorCode == WSAENOTSOCK) || (m_ErrorCode == WSAEINTR)) {
+ break;
+ }
+ continue;
+ }
+ n = ncounter;
+#endif
+ SCTP_HEADER_LEN(udprecvmbuf6[0]) = n; /* length of total packet */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+ if ((unsigned int)n <= iovlen) {
+ SCTP_BUF_LEN(udprecvmbuf6[0]) = n;
+ (to_fill)++;
+ } else {
+ i = 0;
+ SCTP_BUF_LEN(udprecvmbuf6[0]) = iovlen;
+
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ do {
+ udprecvmbuf6[i]->m_next = udprecvmbuf6[i+1];
+ SCTP_BUF_LEN(udprecvmbuf6[i]->m_next) = min(ncounter, iovlen);
+ i++;
+ ncounter -= min(ncounter, iovlen);
+ (to_fill)++;
+ } while (ncounter > 0);
+ }
+
+ for (cmsgptr = CMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) {
+ if ((cmsgptr->cmsg_level == IPPROTO_IPV6) && (cmsgptr->cmsg_type == IPV6_PKTINFO)) {
+ struct in6_pktinfo *info;
+
+ dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ info = (struct in6_pktinfo *)CMSG_DATA(cmsgptr);
+ /*dst.sin6_port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));*/
+ memcpy((void *)&dst.sin6_addr, (const void *)&(info->ipi6_addr), sizeof(struct in6_addr));
+ }
+ }
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN6_IS_ADDR_MULTICAST(&dst.sin6_addr)) {
+ m_freem(udprecvmbuf6[0]);
+ continue;
+ }
+
+ sh = mtod(udprecvmbuf6[0], struct sctphdr *);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ offset = sizeof(struct sctphdr);
+
+ port = src.sin6_port;
+ src.sin6_port = sh->src_port;
+ dst.sin6_port = sh->dest_port;
+ if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0)) {
+ compute_crc = 0;
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ }
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Received %d bytes.", __func__, n);
+ SCTPDBG(SCTP_DEBUG_USR, " - calling sctp_common_input_processing with off=%d\n", (int)sizeof(struct sctphdr));
+ sctp_common_input_processing(&udprecvmbuf6[0], 0, offset, n,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ compute_crc,
+ 0,
+ SCTP_DEFAULT_VRFID, port);
+ if (udprecvmbuf6[0]) {
+ m_freem(udprecvmbuf6[0]);
+ }
+ }
+ for (i = 0; i < MAXLEN_MBUF_CHAIN; i++) {
+ m_free(udprecvmbuf6[i]);
+ }
+ /* free the array itself */
+ free(udprecvmbuf6);
+ SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP6 rcv\n", __func__);
+ return (NULL);
+}
+#endif
+
+#if defined(_WIN32)
+static void
+setReceiveBufferSize(SOCKET sfd, int new_size)
+#else
+static void
+setReceiveBufferSize(int sfd, int new_size)
+#endif
+{
+ int ch = new_size;
+
+ if (setsockopt (sfd, SOL_SOCKET, SO_RCVBUF, (void*)&ch, sizeof(ch)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set recv-buffers size (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set recv-buffers size (errno = %d).\n", errno);
+#endif
+ }
+ return;
+}
+
+#if defined(_WIN32)
+static void
+setSendBufferSize(SOCKET sfd, int new_size)
+#else
+static void
+setSendBufferSize(int sfd, int new_size)
+#endif
+{
+ int ch = new_size;
+
+ if (setsockopt (sfd, SOL_SOCKET, SO_SNDBUF, (void*)&ch, sizeof(ch)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set send-buffers size (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set send-buffers size (errno = %d).\n", errno);
+#endif
+ }
+ return;
+}
+
+#define SOCKET_TIMEOUT 100 /* in ms */
+void
+recv_thread_init(void)
+{
+#if defined(INET)
+ struct sockaddr_in addr_ipv4;
+ const int hdrincl = 1;
+#endif
+#if defined(INET6)
+ struct sockaddr_in6 addr_ipv6;
+#endif
+#if defined(INET) || defined(INET6)
+ const int on = 1;
+#endif
+#if !defined(_WIN32)
+ struct timeval timeout;
+
+ memset(&timeout, 0, sizeof(struct timeval));
+ timeout.tv_sec = (SOCKET_TIMEOUT / 1000);
+ timeout.tv_usec = (SOCKET_TIMEOUT % 1000) * 1000;
+#else
+ unsigned int timeout = SOCKET_TIMEOUT; /* Timeout in milliseconds */
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+ if (SCTP_BASE_VAR(userspace_route) == -1) {
+ if ((SCTP_BASE_VAR(userspace_route) = socket(AF_ROUTE, SOCK_RAW, 0)) == -1) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create routing socket (errno = %d).\n", errno);
+ }
+#if 0
+ struct sockaddr_nl sanl;
+
+ if ((SCTP_BASE_VAR(userspace_route) = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE)) < 0) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create routing socket (errno = %d.\n", errno);
+ }
+ memset(&sanl, 0, sizeof(sanl));
+ sanl.nl_family = AF_NETLINK;
+ sanl.nl_groups = 0;
+#ifdef INET
+ sanl.nl_groups |= RTMGRP_IPV4_IFADDR;
+#endif
+#ifdef INET6
+ sanl.nl_groups |= RTMGRP_IPV6_IFADDR;
+#endif
+ if (bind(SCTP_BASE_VAR(userspace_route), (struct sockaddr *) &sanl, sizeof(sanl)) < 0) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind routing socket (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_route));
+ SCTP_BASE_VAR(userspace_route) = -1;
+ }
+#endif
+ if (SCTP_BASE_VAR(userspace_route) != -1) {
+ if (setsockopt(SCTP_BASE_VAR(userspace_route), SOL_SOCKET, SO_RCVTIMEO,(const void*)&timeout, sizeof(struct timeval)) < 0) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on routing socket (errno = %d).\n", errno);
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_route));
+#else
+ close(SCTP_BASE_VAR(userspace_route));
+#endif
+ SCTP_BASE_VAR(userspace_route) = -1;
+ }
+ }
+ }
+#endif
+#if defined(INET)
+ if (SCTP_BASE_VAR(userspace_rawsctp) == -1) {
+ if ((SCTP_BASE_VAR(userspace_rawsctp) = socket(AF_INET, SOCK_RAW, IPPROTO_SCTP)) == -1) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create raw socket for IPv4 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create raw socket for IPv4 (errno = %d).\n", errno);
+#endif
+ } else {
+ /* complete setting up the raw SCTP socket */
+ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp), IPPROTO_IP, IP_HDRINCL,(const void*)&hdrincl, sizeof(int)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_HDRINCL (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_HDRINCL (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ } else if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv4 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv4 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ } else {
+ memset((void *)&addr_ipv4, 0, sizeof(struct sockaddr_in));
+#ifdef HAVE_SIN_LEN
+ addr_ipv4.sin_len = sizeof(struct sockaddr_in);
+#endif
+ addr_ipv4.sin_family = AF_INET;
+ addr_ipv4.sin_port = htons(0);
+ addr_ipv4.sin_addr.s_addr = htonl(INADDR_ANY);
+ if (bind(SCTP_BASE_VAR(userspace_rawsctp), (const struct sockaddr *)&addr_ipv4, sizeof(struct sockaddr_in)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv4 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv4 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ } else {
+ setReceiveBufferSize(SCTP_BASE_VAR(userspace_rawsctp), SB_RAW); /* 128K */
+ setSendBufferSize(SCTP_BASE_VAR(userspace_rawsctp), SB_RAW); /* 128K Is this setting net.inet.raw.maxdgram value? Should it be set to 64K? */
+ }
+ }
+ }
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp) == -1) {
+ if ((SCTP_BASE_VAR(userspace_udpsctp) = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv4 (errno = %d).\n", errno);
+#endif
+ } else {
+#if defined(IP_PKTINFO)
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp), IPPROTO_IP, IP_PKTINFO, (const void *)&on, (int)sizeof(int)) < 0) {
+#else
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp), IPPROTO_IP, IP_RECVDSTADDR, (const void *)&on, (int)sizeof(int)) < 0) {
+#endif
+#if defined(_WIN32)
+#if defined(IP_PKTINFO)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_PKTINFO on socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_RECVDSTADDR on socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError());
+#endif
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp));
+#else
+#if defined(IP_PKTINFO)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_PKTINFO on socket for SCTP/UDP/IPv4 (errno = %d).\n", errno);
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_RECVDSTADDR on socket for SCTP/UDP/IPv4 (errno = %d).\n", errno);
+#endif
+ close(SCTP_BASE_VAR(userspace_udpsctp));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ } else if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv4 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ } else {
+ memset((void *)&addr_ipv4, 0, sizeof(struct sockaddr_in));
+#ifdef HAVE_SIN_LEN
+ addr_ipv4.sin_len = sizeof(struct sockaddr_in);
+#endif
+ addr_ipv4.sin_family = AF_INET;
+ addr_ipv4.sin_port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ addr_ipv4.sin_addr.s_addr = htonl(INADDR_ANY);
+ if (bind(SCTP_BASE_VAR(userspace_udpsctp), (const struct sockaddr *)&addr_ipv4, sizeof(struct sockaddr_in)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv4 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ } else {
+ setReceiveBufferSize(SCTP_BASE_VAR(userspace_udpsctp), SB_RAW); /* 128K */
+ setSendBufferSize(SCTP_BASE_VAR(userspace_udpsctp), SB_RAW); /* 128K Is this setting net.inet.raw.maxdgram value? Should it be set to 64K? */
+ }
+ }
+ }
+ }
+#endif
+#if defined(INET6)
+ if (SCTP_BASE_VAR(userspace_rawsctp6) == -1) {
+ if ((SCTP_BASE_VAR(userspace_rawsctp6) = socket(AF_INET6, SOCK_RAW, IPPROTO_SCTP)) == -1) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/IPv6 (errno = %d).\n", errno);
+#endif
+ } else {
+ /* complete setting up the raw SCTP socket */
+#if defined(IPV6_RECVPKTINFO)
+ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_RECVPKTINFO, (const void *)&on, sizeof(on)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ } else {
+#else
+ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_PKTINFO,(const void*)&on, sizeof(on)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ } else {
+#endif
+ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_V6ONLY, (const void*)&on, (socklen_t)sizeof(on)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/IPv6 (errno = %d).\n", errno);
+#endif
+ }
+ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ } else {
+ memset((void *)&addr_ipv6, 0, sizeof(struct sockaddr_in6));
+#ifdef HAVE_SIN6_LEN
+ addr_ipv6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ addr_ipv6.sin6_family = AF_INET6;
+ addr_ipv6.sin6_port = htons(0);
+ addr_ipv6.sin6_addr = in6addr_any;
+ if (bind(SCTP_BASE_VAR(userspace_rawsctp6), (const struct sockaddr *)&addr_ipv6, sizeof(struct sockaddr_in6)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ } else {
+ setReceiveBufferSize(SCTP_BASE_VAR(userspace_rawsctp6), SB_RAW); /* 128K */
+ setSendBufferSize(SCTP_BASE_VAR(userspace_rawsctp6), SB_RAW); /* 128K Is this setting net.inet.raw.maxdgram value? Should it be set to 64K? */
+ }
+ }
+ }
+ }
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp6) == -1) {
+ if ((SCTP_BASE_VAR(userspace_udpsctp6) = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+#endif
+ }
+#if defined(IPV6_RECVPKTINFO)
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_RECVPKTINFO, (const void *)&on, (int)sizeof(int)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ } else {
+#else
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_PKTINFO, (const void *)&on, (int)sizeof(int)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ } else {
+#endif
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_V6ONLY, (const void *)&on, (socklen_t)sizeof(on)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+#endif
+ }
+ if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ } else {
+ memset((void *)&addr_ipv6, 0, sizeof(struct sockaddr_in6));
+#ifdef HAVE_SIN6_LEN
+ addr_ipv6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+ addr_ipv6.sin6_family = AF_INET6;
+ addr_ipv6.sin6_port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ addr_ipv6.sin6_addr = in6addr_any;
+ if (bind(SCTP_BASE_VAR(userspace_udpsctp6), (const struct sockaddr *)&addr_ipv6, sizeof(struct sockaddr_in6)) < 0) {
+#if defined(_WIN32)
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError());
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+#else
+ SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv6 (errno = %d).\n", errno);
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ } else {
+ setReceiveBufferSize(SCTP_BASE_VAR(userspace_udpsctp6), SB_RAW); /* 128K */
+ setSendBufferSize(SCTP_BASE_VAR(userspace_udpsctp6), SB_RAW); /* 128K Is this setting net.inet.raw.maxdgram value? Should it be set to 64K? */
+ }
+ }
+ }
+ }
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+#if defined(INET) || defined(INET6)
+ if (SCTP_BASE_VAR(userspace_route) != -1) {
+ int rc;
+
+ if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadroute), &recv_function_route))) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't start routing thread (%d).\n", rc);
+ close(SCTP_BASE_VAR(userspace_route));
+ SCTP_BASE_VAR(userspace_route) = -1;
+ }
+ }
+#endif
+#endif
+#if defined(INET)
+ if (SCTP_BASE_VAR(userspace_rawsctp) != -1) {
+ int rc;
+
+ if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadraw), &recv_function_raw))) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/IPv4 recv thread (%d).\n", rc);
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp));
+#else
+ close(SCTP_BASE_VAR(userspace_rawsctp));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ }
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp) != -1) {
+ int rc;
+
+ if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadudp), &recv_function_udp))) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/UDP/IPv4 recv thread (%d).\n", rc);
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp));
+#else
+ close(SCTP_BASE_VAR(userspace_udpsctp));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ }
+ }
+#endif
+#if defined(INET6)
+ if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) {
+ int rc;
+
+ if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadraw6), &recv_function_raw6))) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/IPv6 recv thread (%d).\n", rc);
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+#else
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ }
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp6) != -1) {
+ int rc;
+
+ if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadudp6), &recv_function_udp6))) {
+ SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/UDP/IPv6 recv thread (%d).\n", rc);
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+#else
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+#endif
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ }
+ }
+#endif
+}
+
+void
+recv_thread_destroy(void)
+{
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
+#if defined(INET) || defined(INET6)
+ if (SCTP_BASE_VAR(userspace_route) != -1) {
+ close(SCTP_BASE_VAR(userspace_route));
+ pthread_join(SCTP_BASE_VAR(recvthreadroute), NULL);
+ }
+#endif
+#endif
+#if defined(INET)
+ if (SCTP_BASE_VAR(userspace_rawsctp) != -1) {
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp));
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw), INFINITE);
+ CloseHandle(SCTP_BASE_VAR(recvthreadraw));
+#else
+ close(SCTP_BASE_VAR(userspace_rawsctp));
+ SCTP_BASE_VAR(userspace_rawsctp) = -1;
+ pthread_join(SCTP_BASE_VAR(recvthreadraw), NULL);
+#endif
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp) != -1) {
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp));
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp), INFINITE);
+ CloseHandle(SCTP_BASE_VAR(recvthreadudp));
+#else
+ close(SCTP_BASE_VAR(userspace_udpsctp));
+ SCTP_BASE_VAR(userspace_udpsctp) = -1;
+ pthread_join(SCTP_BASE_VAR(recvthreadudp), NULL);
+#endif
+ }
+#endif
+#if defined(INET6)
+ if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) {
+#if defined(_WIN32)
+ closesocket(SCTP_BASE_VAR(userspace_rawsctp6));
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw6), INFINITE);
+ CloseHandle(SCTP_BASE_VAR(recvthreadraw6));
+#else
+ close(SCTP_BASE_VAR(userspace_rawsctp6));
+ SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+ pthread_join(SCTP_BASE_VAR(recvthreadraw6), NULL);
+#endif
+ }
+ if (SCTP_BASE_VAR(userspace_udpsctp6) != -1) {
+#if defined(_WIN32)
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ closesocket(SCTP_BASE_VAR(userspace_udpsctp6));
+ WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp6), INFINITE);
+ CloseHandle(SCTP_BASE_VAR(recvthreadudp6));
+#else
+ close(SCTP_BASE_VAR(userspace_udpsctp6));
+ SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+ pthread_join(SCTP_BASE_VAR(recvthreadudp6), NULL);
+#endif
+ }
+#endif
+}
+#else
+int foo;
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_recv_thread.h b/ext/sctp/usrsctp/usrsctplib/user_recv_thread.h
new file mode 100644
index 000000000..61cdf1721
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_recv_thread.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2012 Michael Tuexen
+ * Copyright (c) 2012 Irene Ruengeler
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USER_RECV_THREAD_H_
+#define _USER_RECV_THREAD_H_
+
+void recv_thread_init(void);
+void recv_thread_destroy(void);
+
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_route.h b/ext/sctp/usrsctp/usrsctplib/user_route.h
new file mode 100644
index 000000000..82b07d769
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_route.h
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _USER_ROUTE_H_
+#define _USER_ROUTE_H_
+
+/*
+ * Kernel resident routing tables.
+ *
+ * The routing tables are initialized when interface addresses
+ * are set by making entries for all directly connected interfaces.
+ */
+
+/*
+ * A route consists of a destination address and a reference
+ * to a routing entry. These are often held by protocols
+ * in their control blocks, e.g. inpcb.
+ */
+
+struct sctp_route {
+ struct sctp_rtentry *ro_rt;
+ struct sockaddr ro_dst;
+};
+
+/*
+ * These numbers are used by reliable protocols for determining
+ * retransmission behavior and are included in the routing structure.
+ */
+struct sctp_rt_metrics_lite {
+ uint32_t rmx_mtu; /* MTU for this path */
+#if 0
+ u_long rmx_expire; /* lifetime for route, e.g. redirect */
+ u_long rmx_pksent; /* packets sent using this route */
+#endif
+};
+
+/*
+ * We distinguish between routes to hosts and routes to networks,
+ * preferring the former if available. For each route we infer
+ * the interface to use from the gateway address supplied when
+ * the route was entered. Routes that forward packets through
+ * gateways are marked so that the output routines know to address the
+ * gateway rather than the ultimate destination.
+ */
+struct sctp_rtentry {
+#if 0
+ struct radix_node rt_nodes[2]; /* tree glue, and other values */
+ /*
+ * XXX struct rtentry must begin with a struct radix_node (or two!)
+ * because the code does some casts of a 'struct radix_node *'
+ * to a 'struct rtentry *'
+ */
+#define rt_key(r) (*((struct sockaddr **)(&(r)->rt_nodes->rn_key)))
+#define rt_mask(r) (*((struct sockaddr **)(&(r)->rt_nodes->rn_mask)))
+ struct sockaddr *rt_gateway; /* value */
+ u_long rt_flags; /* up/down?, host/net */
+#endif
+ struct ifnet *rt_ifp; /* the answer: interface to use */
+ struct ifaddr *rt_ifa; /* the answer: interface address to use */
+ struct sctp_rt_metrics_lite rt_rmx; /* metrics used by rx'ing protocols */
+ long rt_refcnt; /* # held references */
+#if 0
+ struct sockaddr *rt_genmask; /* for generation of cloned routes */
+ caddr_t rt_llinfo; /* pointer to link level info cache */
+ struct rtentry *rt_gwroute; /* implied entry for gatewayed routes */
+ struct rtentry *rt_parent; /* cloning parent of this route */
+#endif
+ struct mtx rt_mtx; /* mutex for routing entry */
+};
+
+#define RT_LOCK_INIT(_rt) mtx_init(&(_rt)->rt_mtx, "rtentry", NULL, MTX_DEF | MTX_DUPOK)
+#define RT_LOCK(_rt) mtx_lock(&(_rt)->rt_mtx)
+#define RT_UNLOCK(_rt) mtx_unlock(&(_rt)->rt_mtx)
+#define RT_LOCK_DESTROY(_rt) mtx_destroy(&(_rt)->rt_mtx)
+#define RT_LOCK_ASSERT(_rt) mtx_assert(&(_rt)->rt_mtx, MA_OWNED)
+
+#define RT_ADDREF(_rt) do { \
+ RT_LOCK_ASSERT(_rt); \
+ KASSERT((_rt)->rt_refcnt >= 0, \
+ ("negative refcnt %ld", (_rt)->rt_refcnt)); \
+ (_rt)->rt_refcnt++; \
+} while (0)
+#define RT_REMREF(_rt) do { \
+ RT_LOCK_ASSERT(_rt); \
+ KASSERT((_rt)->rt_refcnt > 0, \
+ ("bogus refcnt %ld", (_rt)->rt_refcnt)); \
+ (_rt)->rt_refcnt--; \
+} while (0)
+#define RTFREE_LOCKED(_rt) do { \
+ if ((_rt)->rt_refcnt <= 1) { \
+ rtfree(_rt); \
+ } else { \
+ RT_REMREF(_rt); \
+ RT_UNLOCK(_rt); \
+ } \
+ /* guard against invalid refs */ \
+ _rt = NULL; \
+ } while (0)
+#define RTFREE(_rt) do { \
+ RT_LOCK(_rt); \
+ RTFREE_LOCKED(_rt); \
+} while (0)
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/user_socket.c b/ext/sctp/usrsctp/usrsctplib/user_socket.c
new file mode 100644
index 000000000..b72b05ebc
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_socket.c
@@ -0,0 +1,3590 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2004 The FreeBSD Foundation
+ * Copyright (c) 2004-2008 Robert N. M. Watson
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_peeloff.h>
+#include <netinet/sctp_callout.h>
+#include <netinet/sctp_crc32.h>
+#ifdef INET6
+#include <netinet6/sctp6_var.h>
+#endif
+#if defined(__FreeBSD__)
+#include <sys/param.h>
+#endif
+#if defined(__linux__)
+#define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
+#endif
+#if !defined(_WIN32)
+#if defined INET || defined INET6
+#include <netinet/udp.h>
+#endif
+#include <arpa/inet.h>
+#else
+#include <user_socketvar.h>
+#endif
+userland_mutex_t accept_mtx;
+userland_cond_t accept_cond;
+#ifdef _WIN32
+#include <time.h>
+#include <sys/timeb.h>
+#endif
+
+MALLOC_DEFINE(M_PCB, "sctp_pcb", "sctp pcb");
+MALLOC_DEFINE(M_SONAME, "sctp_soname", "sctp soname");
+#define MAXLEN_MBUF_CHAIN 32
+
+/* Prototypes */
+extern int sctp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags,
+ /* proc is a dummy in __Userspace__ and will not be passed to sctp_lower_sosend */
+ struct proc *p);
+
+extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id);
+extern int sctpconn_attach(struct socket *so, int proto, uint32_t vrf_id);
+
+static void init_sync(void) {
+#if defined(_WIN32)
+#if defined(INET) || defined(INET6)
+ WSADATA wsaData;
+
+ if (WSAStartup(MAKEWORD(2,2), &wsaData) != 0) {
+ SCTP_PRINTF("WSAStartup failed\n");
+ exit (-1);
+ }
+#endif
+ InitializeConditionVariable(&accept_cond);
+ InitializeCriticalSection(&accept_mtx);
+#else
+ pthread_mutexattr_t mutex_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+#ifdef INVARIANTS
+ pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
+#endif
+ pthread_mutex_init(&accept_mtx, &mutex_attr);
+ pthread_mutexattr_destroy(&mutex_attr);
+ pthread_cond_init(&accept_cond, NULL);
+#endif
+}
+
+void
+usrsctp_init(uint16_t port,
+ int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*debug_printf)(const char *format, ...))
+{
+ init_sync();
+ sctp_init(port, conn_output, debug_printf, 1);
+}
+
+
+void
+usrsctp_init_nothreads(uint16_t port,
+ int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*debug_printf)(const char *format, ...))
+{
+ init_sync();
+ sctp_init(port, conn_output, debug_printf, 0);
+}
+
+
+/* Taken from usr/src/sys/kern/uipc_sockbuf.c and modified for __Userspace__*/
+/*
+ * Socantsendmore indicates that no more data will be sent on the socket; it
+ * would normally be applied to a socket when the user informs the system
+ * that no more data is to be sent, by the protocol code (in case
+ * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be
+ * received, and will normally be applied to the socket by a protocol when it
+ * detects that the peer will send no more data. Data queued for reading in
+ * the socket may yet be read.
+ */
+
+void socantrcvmore_locked(struct socket *so)
+{
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ so->so_rcv.sb_state |= SBS_CANTRCVMORE;
+ sorwakeup_locked(so);
+}
+
+void socantrcvmore(struct socket *so)
+{
+ SOCKBUF_LOCK(&so->so_rcv);
+ socantrcvmore_locked(so);
+}
+
+void
+socantsendmore_locked(struct socket *so)
+{
+ SOCKBUF_LOCK_ASSERT(&so->so_snd);
+ so->so_snd.sb_state |= SBS_CANTSENDMORE;
+ sowwakeup_locked(so);
+}
+
+void
+socantsendmore(struct socket *so)
+{
+ SOCKBUF_LOCK(&so->so_snd);
+ socantsendmore_locked(so);
+}
+
+
+
+/* Taken from usr/src/sys/kern/uipc_sockbuf.c and called within sctp_lower_sosend.
+ */
+int
+sbwait(struct sockbuf *sb)
+{
+ SOCKBUF_LOCK_ASSERT(sb);
+
+ sb->sb_flags |= SB_WAIT;
+#if defined(_WIN32)
+ if (SleepConditionVariableCS(&(sb->sb_cond), &(sb->sb_mtx), INFINITE))
+ return (0);
+ else
+ return (-1);
+#else
+ return (pthread_cond_wait(&(sb->sb_cond), &(sb->sb_mtx)));
+#endif
+}
+
+
+
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ */
+static struct socket *
+soalloc(void)
+{
+ struct socket *so;
+
+ /*
+ * soalloc() sets of socket layer state for a socket,
+ * called only by socreate() and sonewconn().
+ *
+ * sodealloc() tears down socket layer state for a socket,
+ * called only by sofree() and sonewconn().
+ * __Userspace__ TODO : Make sure so is properly deallocated
+ * when tearing down the connection.
+ */
+
+ so = (struct socket *)malloc(sizeof(struct socket));
+
+ if (so == NULL) {
+ return (NULL);
+ }
+ memset(so, 0, sizeof(struct socket));
+
+ /* __Userspace__ Initializing the socket locks here */
+ SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
+ SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
+ SOCKBUF_COND_INIT(&so->so_snd);
+ SOCKBUF_COND_INIT(&so->so_rcv);
+ SOCK_COND_INIT(so); /* timeo_cond */
+
+ /* __Userspace__ Any ref counting required here? Will we have any use for aiojobq?
+ What about gencnt and numopensockets?*/
+ TAILQ_INIT(&so->so_aiojobq);
+ return (so);
+}
+
+static void
+sodealloc(struct socket *so)
+{
+
+ KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
+ KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
+
+ SOCKBUF_COND_DESTROY(&so->so_snd);
+ SOCKBUF_COND_DESTROY(&so->so_rcv);
+
+ SOCK_COND_DESTROY(so);
+
+ SOCKBUF_LOCK_DESTROY(&so->so_snd);
+ SOCKBUF_LOCK_DESTROY(&so->so_rcv);
+
+ free(so);
+}
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ */
+void
+sofree(struct socket *so)
+{
+ struct socket *head;
+
+ ACCEPT_LOCK_ASSERT();
+ SOCK_LOCK_ASSERT(so);
+ /* SS_NOFDREF unset in accept call. this condition seems irrelevent
+ * for __Userspace__...
+ */
+ if (so->so_count != 0 ||
+ (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ return;
+ }
+ head = so->so_head;
+ if (head != NULL) {
+ KASSERT((so->so_qstate & SQ_COMP) != 0 ||
+ (so->so_qstate & SQ_INCOMP) != 0,
+ ("sofree: so_head != NULL, but neither SQ_COMP nor "
+ "SQ_INCOMP"));
+ KASSERT((so->so_qstate & SQ_COMP) == 0 ||
+ (so->so_qstate & SQ_INCOMP) == 0,
+ ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ so->so_qstate &= ~SQ_INCOMP;
+ so->so_head = NULL;
+ }
+ KASSERT((so->so_qstate & SQ_COMP) == 0 &&
+ (so->so_qstate & SQ_INCOMP) == 0,
+ ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
+ so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
+ if (so->so_options & SCTP_SO_ACCEPTCONN) {
+ KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
+ KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
+ }
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ sctp_close(so); /* was... sctp_detach(so); */
+ /*
+ * From this point on, we assume that no other references to this
+ * socket exist anywhere else in the stack. Therefore, no locks need
+ * to be acquired or held.
+ *
+ * We used to do a lot of socket buffer and socket locking here, as
+ * well as invoke sorflush() and perform wakeups. The direct call to
+ * dom_dispose() and sbrelease_internal() are an inlining of what was
+ * necessary from sorflush().
+ *
+ * Notice that the socket buffer and kqueue state are torn down
+ * before calling pru_detach. This means that protocols shold not
+ * assume they can perform socket wakeups, etc, in their detach code.
+ */
+ sodealloc(so);
+}
+
+
+
+/* Taken from /src/sys/kern/uipc_socket.c */
+void
+soabort(struct socket *so)
+{
+#if defined(INET6)
+ struct sctp_inpcb *inp;
+#endif
+
+#if defined(INET6)
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ sctp6_abort(so);
+ } else {
+#if defined(INET)
+ sctp_abort(so);
+#endif
+ }
+#elif defined(INET)
+ sctp_abort(so);
+#endif
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ sofree(so);
+}
+
+
+/* Taken from usr/src/sys/kern/uipc_socket.c and called within sctp_connect (sctp_usrreq.c).
+ * We use sctp_connect for send_one_init_real in ms1.
+ */
+void
+soisconnecting(struct socket *so)
+{
+
+ SOCK_LOCK(so);
+ so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
+ so->so_state |= SS_ISCONNECTING;
+ SOCK_UNLOCK(so);
+}
+
+/* Taken from usr/src/sys/kern/uipc_socket.c and called within sctp_disconnect (sctp_usrreq.c).
+ * TODO Do we use sctp_disconnect?
+ */
+void
+soisdisconnecting(struct socket *so)
+{
+
+ /*
+ * Note: This code assumes that SOCK_LOCK(so) and
+ * SOCKBUF_LOCK(&so->so_rcv) are the same.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_state &= ~SS_ISCONNECTING;
+ so->so_state |= SS_ISDISCONNECTING;
+ so->so_rcv.sb_state |= SBS_CANTRCVMORE;
+ sorwakeup_locked(so);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_state |= SBS_CANTSENDMORE;
+ sowwakeup_locked(so);
+ wakeup("dummy",so);
+ /* requires 2 args but this was in orig */
+ /* wakeup(&so->so_timeo); */
+}
+
+
+/* Taken from sys/kern/kern_synch.c and
+ modified for __Userspace__
+*/
+
+/*
+ * Make all threads sleeping on the specified identifier runnable.
+ * Associating wakeup with so_timeo identifier and timeo_cond
+ * condition variable. TODO. If we use iterator thread then we need to
+ * modify wakeup so it can distinguish between iterator identifier and
+ * timeo identifier.
+ */
+void
+wakeup(void *ident, struct socket *so)
+{
+ SOCK_LOCK(so);
+#if defined(_WIN32)
+ WakeAllConditionVariable(&(so)->timeo_cond);
+#else
+ pthread_cond_broadcast(&(so)->timeo_cond);
+#endif
+ SOCK_UNLOCK(so);
+}
+
+
+/*
+ * Make a thread sleeping on the specified identifier runnable.
+ * May wake more than one thread if a target thread is currently
+ * swapped out.
+ */
+void
+wakeup_one(void *ident)
+{
+ /* __Userspace__ Check: We are using accept_cond for wakeup_one.
+ It seems that wakeup_one is only called within
+ soisconnected() and sonewconn() with ident &head->so_timeo
+ head is so->so_head, which is back pointer to listen socket
+ This seems to indicate that the use of accept_cond is correct
+ since socket where accepts occur is so_head in all
+ subsidiary sockets.
+ */
+ ACCEPT_LOCK();
+#if defined(_WIN32)
+ WakeAllConditionVariable(&accept_cond);
+#else
+ pthread_cond_broadcast(&accept_cond);
+#endif
+ ACCEPT_UNLOCK();
+}
+
+
+/* Called within sctp_process_cookie_[existing/new] */
+void
+soisconnected(struct socket *so)
+{
+ struct socket *head;
+
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
+ so->so_state |= SS_ISCONNECTED;
+ head = so->so_head;
+ if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
+ SOCK_UNLOCK(so);
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ so->so_qstate &= ~SQ_INCOMP;
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+ head->so_qlen++;
+ so->so_qstate |= SQ_COMP;
+ ACCEPT_UNLOCK();
+ sorwakeup(head);
+ wakeup_one(&head->so_timeo);
+ return;
+ }
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ wakeup(&so->so_timeo, so);
+ sorwakeup(so);
+ sowwakeup(so);
+
+}
+
+/* called within sctp_handle_cookie_echo */
+
+struct socket *
+sonewconn(struct socket *head, int connstatus)
+{
+ struct socket *so;
+ int over;
+
+ ACCEPT_LOCK();
+ over = (head->so_qlen > 3 * head->so_qlimit / 2);
+ ACCEPT_UNLOCK();
+#ifdef REGRESSION
+ if (regression_sonewconn_earlytest && over)
+#else
+ if (over)
+#endif
+ return (NULL);
+ so = soalloc();
+ if (so == NULL)
+ return (NULL);
+ so->so_head = head;
+ so->so_type = head->so_type;
+ so->so_options = head->so_options &~ SCTP_SO_ACCEPTCONN;
+ so->so_linger = head->so_linger;
+ so->so_state = head->so_state | SS_NOFDREF;
+ so->so_dom = head->so_dom;
+#ifdef MAC
+ SOCK_LOCK(head);
+ mac_create_socket_from_socket(head, so);
+ SOCK_UNLOCK(head);
+#endif
+ if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
+ sodealloc(so);
+ return (NULL);
+ }
+ switch (head->so_dom) {
+#ifdef INET
+ case AF_INET:
+ if (sctp_attach(so, IPPROTO_SCTP, SCTP_DEFAULT_VRFID)) {
+ sodealloc(so);
+ return (NULL);
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (sctp6_attach(so, IPPROTO_SCTP, SCTP_DEFAULT_VRFID)) {
+ sodealloc(so);
+ return (NULL);
+ }
+ break;
+#endif
+ case AF_CONN:
+ if (sctpconn_attach(so, IPPROTO_SCTP, SCTP_DEFAULT_VRFID)) {
+ sodealloc(so);
+ return (NULL);
+ }
+ break;
+ default:
+ sodealloc(so);
+ return (NULL);
+ break;
+ }
+ so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
+ so->so_snd.sb_lowat = head->so_snd.sb_lowat;
+ so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
+ so->so_snd.sb_timeo = head->so_snd.sb_timeo;
+ so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
+ so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
+ so->so_state |= connstatus;
+ ACCEPT_LOCK();
+ if (connstatus) {
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+ so->so_qstate |= SQ_COMP;
+ head->so_qlen++;
+ } else {
+ /*
+ * Keep removing sockets from the head until there's room for
+ * us to insert on the tail. In pre-locking revisions, this
+ * was a simple if (), but as we could be racing with other
+ * threads and soabort() requires dropping locks, we must
+ * loop waiting for the condition to be true.
+ */
+ while (head->so_incqlen > head->so_qlimit) {
+ struct socket *sp;
+ sp = TAILQ_FIRST(&head->so_incomp);
+ TAILQ_REMOVE(&head->so_incomp, sp, so_list);
+ head->so_incqlen--;
+ sp->so_qstate &= ~SQ_INCOMP;
+ sp->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(sp);
+ ACCEPT_LOCK();
+ }
+ TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
+ so->so_qstate |= SQ_INCOMP;
+ head->so_incqlen++;
+ }
+ ACCEPT_UNLOCK();
+ if (connstatus) {
+ sorwakeup(head);
+ wakeup_one(&head->so_timeo);
+ }
+ return (so);
+
+}
+
+ /*
+ Source: /src/sys/gnu/fs/xfs/FreeBSD/xfs_ioctl.c
+ */
+static __inline__ int
+copy_to_user(void *dst, void *src, size_t len) {
+ memcpy(dst, src, len);
+ return 0;
+}
+
+static __inline__ int
+copy_from_user(void *dst, void *src, size_t len) {
+ memcpy(dst, src, len);
+ return 0;
+}
+
+/*
+ References:
+ src/sys/dev/lmc/if_lmc.h:
+ src/sys/powerpc/powerpc/copyinout.c
+ src/sys/sys/systm.h
+*/
+# define copyin(u, k, len) copy_from_user(k, u, len)
+
+/* References:
+ src/sys/powerpc/powerpc/copyinout.c
+ src/sys/sys/systm.h
+*/
+# define copyout(k, u, len) copy_to_user(u, k, len)
+
+
+/* copyiniov definition copied/modified from src/sys/kern/kern_subr.c */
+int
+copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
+{
+ u_int iovlen;
+
+ *iov = NULL;
+ if (iovcnt > UIO_MAXIOV)
+ return (error);
+ iovlen = iovcnt * sizeof (struct iovec);
+ *iov = malloc(iovlen); /*, M_IOV, M_WAITOK); */
+ error = copyin(iovp, *iov, iovlen);
+ if (error) {
+ free(*iov); /*, M_IOV); */
+ *iov = NULL;
+ }
+ return (error);
+}
+
+/* (__Userspace__) version of uiomove */
+int
+uiomove(void *cp, int n, struct uio *uio)
+{
+ struct iovec *iov;
+ size_t cnt;
+ int error = 0;
+
+ if ((uio->uio_rw != UIO_READ) &&
+ (uio->uio_rw != UIO_WRITE)) {
+ return (EINVAL);
+ }
+
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > (size_t)n)
+ cnt = n;
+
+ switch (uio->uio_segflg) {
+
+ case UIO_USERSPACE:
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error)
+ goto out;
+ break;
+
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ memcpy(iov->iov_base, cp, cnt);
+ else
+ memcpy(cp, iov->iov_base, cnt);
+ break;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += (off_t)cnt;
+ cp = (char *)cp + cnt;
+ n -= (int)cnt;
+ }
+out:
+ return (error);
+}
+
+
+/* Source: src/sys/kern/uipc_syscalls.c */
+int
+getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
+{
+ struct sockaddr *sa;
+ int error;
+
+ if (len > SOCK_MAXADDRLEN)
+ return (ENAMETOOLONG);
+ if (len < offsetof(struct sockaddr, sa_data))
+ return (EINVAL);
+ MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
+ error = copyin(uaddr, sa, len);
+ if (error) {
+ FREE(sa, M_SONAME);
+ } else {
+#ifdef HAVE_SA_LEN
+ sa->sa_len = len;
+#endif
+ *namp = sa;
+ }
+ return (error);
+}
+
+int
+usrsctp_getsockopt(struct socket *so, int level, int option_name,
+ void *option_value, socklen_t *option_len);
+
+sctp_assoc_t
+usrsctp_getassocid(struct socket *sock, struct sockaddr *sa)
+{
+ struct sctp_paddrinfo sp;
+ socklen_t siz;
+#ifndef HAVE_SA_LEN
+ size_t sa_len;
+#endif
+
+ /* First get the assoc id */
+ siz = sizeof(sp);
+ memset(&sp, 0, sizeof(sp));
+#ifdef HAVE_SA_LEN
+ memcpy((caddr_t)&sp.spinfo_address, sa, sa->sa_len);
+#else
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa_len = sizeof(struct sockaddr_in);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ case AF_CONN:
+ sa_len = sizeof(struct sockaddr_conn);
+ break;
+ default:
+ sa_len = 0;
+ break;
+ }
+ memcpy((caddr_t)&sp.spinfo_address, sa, sa_len);
+#endif
+ if (usrsctp_getsockopt(sock, IPPROTO_SCTP, SCTP_GET_PEER_ADDR_INFO, &sp, &siz) != 0) {
+ /* We depend on the fact that 0 can never be returned */
+ return ((sctp_assoc_t) 0);
+ }
+ return (sp.spinfo_assoc_id);
+}
+
+
+/* Taken from /src/lib/libc/net/sctp_sys_calls.c
+ * and modified for __Userspace__
+ * calling sctp_generic_sendmsg from this function
+ */
+ssize_t
+userspace_sctp_sendmsg(struct socket *so,
+ const void *data,
+ size_t len,
+ struct sockaddr *to,
+ socklen_t tolen,
+ uint32_t ppid,
+ uint32_t flags,
+ uint16_t stream_no,
+ uint32_t timetolive,
+ uint32_t context)
+{
+ struct sctp_sndrcvinfo sndrcvinfo, *sinfo = &sndrcvinfo;
+ struct uio auio;
+ struct iovec iov[1];
+
+ memset(sinfo, 0, sizeof(struct sctp_sndrcvinfo));
+ sinfo->sinfo_ppid = ppid;
+ sinfo->sinfo_flags = flags;
+ sinfo->sinfo_stream = stream_no;
+ sinfo->sinfo_timetolive = timetolive;
+ sinfo->sinfo_context = context;
+ sinfo->sinfo_assoc_id = 0;
+
+
+ /* Perform error checks on destination (to) */
+ if (tolen > SOCK_MAXADDRLEN) {
+ errno = ENAMETOOLONG;
+ return (-1);
+ }
+ if ((tolen > 0) &&
+ ((to == NULL) || (tolen < (socklen_t)sizeof(struct sockaddr)))) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (data == NULL) {
+ errno = EFAULT;
+ return (-1);
+ }
+ /* Adding the following as part of defensive programming, in case the application
+ does not do it when preparing the destination address.*/
+#ifdef HAVE_SA_LEN
+ if (to != NULL) {
+ to->sa_len = tolen;
+ }
+#endif
+
+ iov[0].iov_base = (caddr_t)data;
+ iov[0].iov_len = len;
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = 1;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = len;
+ errno = sctp_lower_sosend(so, to, &auio, NULL, NULL, 0, sinfo);
+ if (errno == 0) {
+ return (len - auio.uio_resid);
+ } else {
+ return (-1);
+ }
+}
+
+
+ssize_t
+usrsctp_sendv(struct socket *so,
+ const void *data,
+ size_t len,
+ struct sockaddr *to,
+ int addrcnt,
+ void *info,
+ socklen_t infolen,
+ unsigned int infotype,
+ int flags)
+{
+ struct sctp_sndrcvinfo sinfo;
+ struct uio auio;
+ struct iovec iov[1];
+ int use_sinfo;
+ sctp_assoc_t *assoc_id;
+
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ if (data == NULL) {
+ errno = EFAULT;
+ return (-1);
+ }
+ memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
+ assoc_id = NULL;
+ use_sinfo = 0;
+ switch (infotype) {
+ case SCTP_SENDV_NOINFO:
+ if ((infolen != 0) || (info != NULL)) {
+ errno = EINVAL;
+ return (-1);
+ }
+ break;
+ case SCTP_SENDV_SNDINFO:
+ if ((info == NULL) || (infolen != sizeof(struct sctp_sndinfo))) {
+ errno = EINVAL;
+ return (-1);
+ }
+ sinfo.sinfo_stream = ((struct sctp_sndinfo *)info)->snd_sid;
+ sinfo.sinfo_flags = ((struct sctp_sndinfo *)info)->snd_flags;
+ sinfo.sinfo_ppid = ((struct sctp_sndinfo *)info)->snd_ppid;
+ sinfo.sinfo_context = ((struct sctp_sndinfo *)info)->snd_context;
+ sinfo.sinfo_assoc_id = ((struct sctp_sndinfo *)info)->snd_assoc_id;
+ assoc_id = &(((struct sctp_sndinfo *)info)->snd_assoc_id);
+ use_sinfo = 1;
+ break;
+ case SCTP_SENDV_PRINFO:
+ if ((info == NULL) || (infolen != sizeof(struct sctp_prinfo))) {
+ errno = EINVAL;
+ return (-1);
+ }
+ sinfo.sinfo_stream = 0;
+ sinfo.sinfo_flags = PR_SCTP_POLICY(((struct sctp_prinfo *)info)->pr_policy);
+ sinfo.sinfo_timetolive = ((struct sctp_prinfo *)info)->pr_value;
+ use_sinfo = 1;
+ break;
+ case SCTP_SENDV_AUTHINFO:
+ errno = EINVAL;
+ return (-1);
+ case SCTP_SENDV_SPA:
+ if ((info == NULL) || (infolen != sizeof(struct sctp_sendv_spa))) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (((struct sctp_sendv_spa *)info)->sendv_flags & SCTP_SEND_SNDINFO_VALID) {
+ sinfo.sinfo_stream = ((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_sid;
+ sinfo.sinfo_flags = ((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_flags;
+ sinfo.sinfo_ppid = ((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_ppid;
+ sinfo.sinfo_context = ((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_context;
+ sinfo.sinfo_assoc_id = ((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_assoc_id;
+ assoc_id = &(((struct sctp_sendv_spa *)info)->sendv_sndinfo.snd_assoc_id);
+ } else {
+ sinfo.sinfo_flags = 0;
+ sinfo.sinfo_stream = 0;
+ }
+ if (((struct sctp_sendv_spa *)info)->sendv_flags & SCTP_SEND_PRINFO_VALID) {
+ sinfo.sinfo_flags |= PR_SCTP_POLICY(((struct sctp_sendv_spa *)info)->sendv_prinfo.pr_policy);
+ sinfo.sinfo_timetolive = ((struct sctp_sendv_spa *)info)->sendv_prinfo.pr_value;
+ }
+ if (((struct sctp_sendv_spa *)info)->sendv_flags & SCTP_SEND_AUTHINFO_VALID) {
+ errno = EINVAL;
+ return (-1);
+ }
+ use_sinfo = 1;
+ break;
+ default:
+ errno = EINVAL;
+ return (-1);
+ }
+
+ /* Perform error checks on destination (to) */
+ if (addrcnt > 1) {
+ errno = EINVAL;
+ return (-1);
+ }
+
+ iov[0].iov_base = (caddr_t)data;
+ iov[0].iov_len = len;
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = 1;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = len;
+ errno = sctp_lower_sosend(so, to, &auio, NULL, NULL, flags, use_sinfo ? &sinfo : NULL);
+ if (errno == 0) {
+ if ((to != NULL) && (assoc_id != NULL)) {
+ *assoc_id = usrsctp_getassocid(so, to);
+ }
+ return (len - auio.uio_resid);
+ } else {
+ return (-1);
+ }
+}
+
+
+ssize_t
+userspace_sctp_sendmbuf(struct socket *so,
+ struct mbuf* mbufdata,
+ size_t len,
+ struct sockaddr *to,
+ socklen_t tolen,
+ uint32_t ppid,
+ uint32_t flags,
+ uint16_t stream_no,
+ uint32_t timetolive,
+ uint32_t context)
+{
+
+ struct sctp_sndrcvinfo sndrcvinfo, *sinfo = &sndrcvinfo;
+ /* struct uio auio;
+ struct iovec iov[1]; */
+ int error = 0;
+ int uflags = 0;
+ ssize_t retval;
+
+ sinfo->sinfo_ppid = ppid;
+ sinfo->sinfo_flags = flags;
+ sinfo->sinfo_stream = stream_no;
+ sinfo->sinfo_timetolive = timetolive;
+ sinfo->sinfo_context = context;
+ sinfo->sinfo_assoc_id = 0;
+
+ /* Perform error checks on destination (to) */
+ if (tolen > SOCK_MAXADDRLEN){
+ error = (ENAMETOOLONG);
+ goto sendmsg_return;
+ }
+ if (tolen < (socklen_t)offsetof(struct sockaddr, sa_data)){
+ error = (EINVAL);
+ goto sendmsg_return;
+ }
+ /* Adding the following as part of defensive programming, in case the application
+ does not do it when preparing the destination address.*/
+#ifdef HAVE_SA_LEN
+ to->sa_len = tolen;
+#endif
+
+ error = sctp_lower_sosend(so, to, NULL/*uio*/,
+ (struct mbuf *)mbufdata, (struct mbuf *)NULL,
+ uflags, sinfo);
+sendmsg_return:
+ /* TODO: Needs a condition for non-blocking when error is EWOULDBLOCK */
+ if (0 == error)
+ retval = len;
+ else if (error == EWOULDBLOCK) {
+ errno = EWOULDBLOCK;
+ retval = -1;
+ } else {
+ SCTP_PRINTF("%s: error = %d\n", __func__, error);
+ errno = error;
+ retval = -1;
+ }
+ return (retval);
+}
+
+
+/* taken from usr.lib/sctp_sys_calls.c and needed here */
+#define SCTP_SMALL_IOVEC_SIZE 2
+
+/* Taken from /src/lib/libc/net/sctp_sys_calls.c
+ * and modified for __Userspace__
+ * calling sctp_generic_recvmsg from this function
+ */
+ssize_t
+userspace_sctp_recvmsg(struct socket *so,
+ void *dbuf,
+ size_t len,
+ struct sockaddr *from,
+ socklen_t *fromlenp,
+ struct sctp_sndrcvinfo *sinfo,
+ int *msg_flags)
+{
+ struct uio auio;
+ struct iovec iov[SCTP_SMALL_IOVEC_SIZE];
+ struct iovec *tiov;
+ int iovlen = 1;
+ int error = 0;
+ ssize_t ulen;
+ int i;
+ socklen_t fromlen;
+
+ iov[0].iov_base = dbuf;
+ iov[0].iov_len = len;
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ error = EINVAL;
+ SCTP_PRINTF("%s: error = %d\n", __func__, error);
+ return (-1);
+ }
+ }
+ ulen = auio.uio_resid;
+ if (fromlenp != NULL) {
+ fromlen = *fromlenp;
+ } else {
+ fromlen = 0;
+ }
+ error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
+ from, fromlen, msg_flags,
+ (struct sctp_sndrcvinfo *)sinfo, 1);
+
+ if (error) {
+ if ((auio.uio_resid != ulen) &&
+ (error == EINTR ||
+#if !defined(__NetBSD__)
+ error == ERESTART ||
+#endif
+ error == EWOULDBLOCK)) {
+ error = 0;
+ }
+ }
+ if ((fromlenp != NULL) && (fromlen > 0) && (from != NULL)) {
+ switch (from->sa_family) {
+#if defined(INET)
+ case AF_INET:
+ *fromlenp = sizeof(struct sockaddr_in);
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ *fromlenp = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ case AF_CONN:
+ *fromlenp = sizeof(struct sockaddr_conn);
+ break;
+ default:
+ *fromlenp = 0;
+ break;
+ }
+ if (*fromlenp > fromlen) {
+ *fromlenp = fromlen;
+ }
+ }
+ if (error == 0) {
+ /* ready return value */
+ return (ulen - auio.uio_resid);
+ } else {
+ SCTP_PRINTF("%s: error = %d\n", __func__, error);
+ return (-1);
+ }
+}
+
+ssize_t
+usrsctp_recvv(struct socket *so,
+ void *dbuf,
+ size_t len,
+ struct sockaddr *from,
+ socklen_t *fromlenp,
+ void *info,
+ socklen_t *infolen,
+ unsigned int *infotype,
+ int *msg_flags)
+{
+ struct uio auio;
+ struct iovec iov[SCTP_SMALL_IOVEC_SIZE];
+ struct iovec *tiov;
+ int iovlen = 1;
+ ssize_t ulen;
+ int i;
+ socklen_t fromlen;
+ struct sctp_rcvinfo *rcv;
+ struct sctp_recvv_rn *rn;
+ struct sctp_extrcvinfo seinfo;
+
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ iov[0].iov_base = dbuf;
+ iov[0].iov_len = len;
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ errno = EINVAL;
+ return (-1);
+ }
+ }
+ ulen = auio.uio_resid;
+ if (fromlenp != NULL) {
+ fromlen = *fromlenp;
+ } else {
+ fromlen = 0;
+ }
+ errno = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
+ from, fromlen, msg_flags,
+ (struct sctp_sndrcvinfo *)&seinfo, 1);
+ if (errno) {
+ if ((auio.uio_resid != ulen) &&
+ (errno == EINTR ||
+#if !defined(__NetBSD__)
+ errno == ERESTART ||
+#endif
+ errno == EWOULDBLOCK)) {
+ errno = 0;
+ }
+ }
+ if (errno != 0) {
+ goto out;
+ }
+ if ((*msg_flags & MSG_NOTIFICATION) == 0) {
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+ *infolen >= (socklen_t)sizeof(struct sctp_recvv_rn) &&
+ seinfo.sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL) {
+ rn = (struct sctp_recvv_rn *)info;
+ rn->recvv_rcvinfo.rcv_sid = seinfo.sinfo_stream;
+ rn->recvv_rcvinfo.rcv_ssn = seinfo.sinfo_ssn;
+ rn->recvv_rcvinfo.rcv_flags = seinfo.sinfo_flags;
+ rn->recvv_rcvinfo.rcv_ppid = seinfo.sinfo_ppid;
+ rn->recvv_rcvinfo.rcv_context = seinfo.sinfo_context;
+ rn->recvv_rcvinfo.rcv_tsn = seinfo.sinfo_tsn;
+ rn->recvv_rcvinfo.rcv_cumtsn = seinfo.sinfo_cumtsn;
+ rn->recvv_rcvinfo.rcv_assoc_id = seinfo.sinfo_assoc_id;
+ rn->recvv_nxtinfo.nxt_sid = seinfo.sreinfo_next_stream;
+ rn->recvv_nxtinfo.nxt_flags = 0;
+ if (seinfo.sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
+ rn->recvv_nxtinfo.nxt_flags |= SCTP_UNORDERED;
+ }
+ if (seinfo.sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
+ rn->recvv_nxtinfo.nxt_flags |= SCTP_NOTIFICATION;
+ }
+ if (seinfo.sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
+ rn->recvv_nxtinfo.nxt_flags |= SCTP_COMPLETE;
+ }
+ rn->recvv_nxtinfo.nxt_ppid = seinfo.sreinfo_next_ppid;
+ rn->recvv_nxtinfo.nxt_length = seinfo.sreinfo_next_length;
+ rn->recvv_nxtinfo.nxt_assoc_id = seinfo.sreinfo_next_aid;
+ *infolen = (socklen_t)sizeof(struct sctp_recvv_rn);
+ *infotype = SCTP_RECVV_RN;
+ } else if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+ *infolen >= (socklen_t)sizeof(struct sctp_rcvinfo)) {
+ rcv = (struct sctp_rcvinfo *)info;
+ rcv->rcv_sid = seinfo.sinfo_stream;
+ rcv->rcv_ssn = seinfo.sinfo_ssn;
+ rcv->rcv_flags = seinfo.sinfo_flags;
+ rcv->rcv_ppid = seinfo.sinfo_ppid;
+ rcv->rcv_context = seinfo.sinfo_context;
+ rcv->rcv_tsn = seinfo.sinfo_tsn;
+ rcv->rcv_cumtsn = seinfo.sinfo_cumtsn;
+ rcv->rcv_assoc_id = seinfo.sinfo_assoc_id;
+ *infolen = (socklen_t)sizeof(struct sctp_rcvinfo);
+ *infotype = SCTP_RECVV_RCVINFO;
+ } else {
+ *infotype = SCTP_RECVV_NOINFO;
+ *infolen = 0;
+ }
+ }
+ if ((fromlenp != NULL) &&
+ (fromlen > 0) &&
+ (from != NULL) &&
+ (ulen > auio.uio_resid)) {
+ switch (from->sa_family) {
+#if defined(INET)
+ case AF_INET:
+ *fromlenp = sizeof(struct sockaddr_in);
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ *fromlenp = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ case AF_CONN:
+ *fromlenp = sizeof(struct sockaddr_conn);
+ break;
+ default:
+ *fromlenp = 0;
+ break;
+ }
+ if (*fromlenp > fromlen) {
+ *fromlenp = fromlen;
+ }
+ }
+out:
+ if (errno == 0) {
+ /* ready return value */
+ return (ulen - auio.uio_resid);
+ } else {
+ return (-1);
+ }
+}
+
+
+
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ * socreate returns a socket. The socket should be
+ * closed with soclose().
+ */
+int
+socreate(int dom, struct socket **aso, int type, int proto)
+{
+ struct socket *so;
+ int error;
+
+ if ((dom != AF_CONN) && (dom != AF_INET) && (dom != AF_INET6)) {
+ return (EINVAL);
+ }
+ if ((type != SOCK_STREAM) && (type != SOCK_SEQPACKET)) {
+ return (EINVAL);
+ }
+ if (proto != IPPROTO_SCTP) {
+ return (EINVAL);
+ }
+
+ so = soalloc();
+ if (so == NULL) {
+ return (ENOBUFS);
+ }
+
+ /*
+ * so_incomp represents a queue of connections that
+ * must be completed at protocol level before being
+ * returned. so_comp field heads a list of sockets
+ * that are ready to be returned to the listening process
+ *__Userspace__ These queues are being used at a number of places like accept etc.
+ */
+ TAILQ_INIT(&so->so_incomp);
+ TAILQ_INIT(&so->so_comp);
+ so->so_type = type;
+ so->so_count = 1;
+ so->so_dom = dom;
+ /*
+ * Auto-sizing of socket buffers is managed by the protocols and
+ * the appropriate flags must be set in the pru_attach function.
+ * For __Userspace__ The pru_attach function in this case is sctp_attach.
+ */
+ switch (dom) {
+#if defined(INET)
+ case AF_INET:
+ error = sctp_attach(so, proto, SCTP_DEFAULT_VRFID);
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ error = sctp6_attach(so, proto, SCTP_DEFAULT_VRFID);
+ break;
+#endif
+ case AF_CONN:
+ error = sctpconn_attach(so, proto, SCTP_DEFAULT_VRFID);
+ break;
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ if (error) {
+ KASSERT(so->so_count == 1, ("socreate: so_count %d", so->so_count));
+ so->so_count = 0;
+ sodealloc(so);
+ return (error);
+ }
+ *aso = so;
+ return (0);
+}
+
+
+/* Taken from /src/sys/kern/uipc_syscalls.c
+ * and modified for __Userspace__
+ * Removing struct thread td.
+ */
+struct socket *
+userspace_socket(int domain, int type, int protocol)
+{
+ struct socket *so = NULL;
+
+ errno = socreate(domain, &so, type, protocol);
+ if (errno) {
+ return (NULL);
+ }
+ /*
+ * The original socket call returns the file descriptor fd.
+ * td->td_retval[0] = fd.
+ * We are returning struct socket *so.
+ */
+ return (so);
+}
+
+struct socket *
+usrsctp_socket(int domain, int type, int protocol,
+ int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data,
+ size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info),
+ int (*send_cb)(struct socket *sock, uint32_t sb_free),
+ uint32_t sb_threshold,
+ void *ulp_info)
+{
+ struct socket *so;
+
+ if ((protocol == IPPROTO_SCTP) && (SCTP_BASE_VAR(sctp_pcb_initialized) == 0)) {
+ errno = EPROTONOSUPPORT;
+ return (NULL);
+ }
+ if ((receive_cb == NULL) &&
+ ((send_cb != NULL) || (sb_threshold != 0) || (ulp_info != NULL))) {
+ errno = EINVAL;
+ return (NULL);
+ }
+ if ((domain == AF_CONN) && (SCTP_BASE_VAR(conn_output) == NULL)) {
+ errno = EAFNOSUPPORT;
+ return (NULL);
+ }
+ errno = socreate(domain, &so, type, protocol);
+ if (errno) {
+ return (NULL);
+ }
+ /*
+ * The original socket call returns the file descriptor fd.
+ * td->td_retval[0] = fd.
+ * We are returning struct socket *so.
+ */
+ register_recv_cb(so, receive_cb);
+ register_send_cb(so, sb_threshold, send_cb);
+ register_ulp_info(so, ulp_info);
+ return (so);
+}
+
+
+u_long sb_max = SB_MAX;
+u_long sb_max_adj =
+ SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
+
+static u_long sb_efficiency = 8; /* parameter for sbreserve() */
+
+/*
+ * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't
+ * become limiting if buffering efficiency is near the normal case.
+ */
+int
+sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so)
+{
+ SOCKBUF_LOCK_ASSERT(sb);
+ sb->sb_mbmax = (u_int)min(cc * sb_efficiency, sb_max);
+ sb->sb_hiwat = (u_int)cc;
+ if (sb->sb_lowat > (int)sb->sb_hiwat)
+ sb->sb_lowat = (int)sb->sb_hiwat;
+ return (1);
+}
+
+static int
+sbreserve(struct sockbuf *sb, u_long cc, struct socket *so)
+{
+ int error;
+
+ SOCKBUF_LOCK(sb);
+ error = sbreserve_locked(sb, cc, so);
+ SOCKBUF_UNLOCK(sb);
+ return (error);
+}
+
+int
+soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
+{
+ SOCKBUF_LOCK(&so->so_snd);
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_snd.sb_hiwat = (uint32_t)sndcc;
+ so->so_rcv.sb_hiwat = (uint32_t)rcvcc;
+
+ if (sbreserve_locked(&so->so_snd, sndcc, so) == 0) {
+ goto bad;
+ }
+ if (sbreserve_locked(&so->so_rcv, rcvcc, so) == 0) {
+ goto bad;
+ }
+ if (so->so_rcv.sb_lowat == 0)
+ so->so_rcv.sb_lowat = 1;
+ if (so->so_snd.sb_lowat == 0)
+ so->so_snd.sb_lowat = MCLBYTES;
+ if (so->so_snd.sb_lowat > (int)so->so_snd.sb_hiwat)
+ so->so_snd.sb_lowat = (int)so->so_snd.sb_hiwat;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (0);
+
+ bad:
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (ENOBUFS);
+}
+
+
+/* Taken from /src/sys/kern/uipc_sockbuf.c
+ * and modified for __Userspace__
+ */
+
+void
+sowakeup(struct socket *so, struct sockbuf *sb)
+{
+
+ SOCKBUF_LOCK_ASSERT(sb);
+
+ sb->sb_flags &= ~SB_SEL;
+ if (sb->sb_flags & SB_WAIT) {
+ sb->sb_flags &= ~SB_WAIT;
+#if defined(_WIN32)
+ WakeAllConditionVariable(&(sb)->sb_cond);
+#else
+ pthread_cond_broadcast(&(sb)->sb_cond);
+#endif
+ }
+ SOCKBUF_UNLOCK(sb);
+}
+
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ */
+
+int
+sobind(struct socket *so, struct sockaddr *nam)
+{
+ switch (nam->sa_family) {
+#if defined(INET)
+ case AF_INET:
+ return (sctp_bind(so, nam));
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ return (sctp6_bind(so, nam, NULL));
+#endif
+ case AF_CONN:
+ return (sctpconn_bind(so, nam));
+ default:
+ return EAFNOSUPPORT;
+ }
+}
+
+/* Taken from /src/sys/kern/uipc_syscalls.c
+ * and modified for __Userspace__
+ */
+
+int
+usrsctp_bind(struct socket *so, struct sockaddr *name, int namelen)
+{
+ struct sockaddr *sa;
+
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ if ((errno = getsockaddr(&sa, (caddr_t)name, namelen)) != 0)
+ return (-1);
+
+ errno = sobind(so, sa);
+ FREE(sa, M_SONAME);
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+}
+
+int
+userspace_bind(struct socket *so, struct sockaddr *name, int namelen)
+{
+ return (usrsctp_bind(so, name, namelen));
+}
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ */
+
+int
+solisten(struct socket *so, int backlog)
+{
+ if (so == NULL) {
+ return (EBADF);
+ } else {
+ return (sctp_listen(so, backlog, NULL));
+ }
+}
+
+
+int
+solisten_proto_check(struct socket *so)
+{
+
+ SOCK_LOCK_ASSERT(so);
+
+ if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
+ SS_ISDISCONNECTING))
+ return (EINVAL);
+ return (0);
+}
+
+static int somaxconn = SOMAXCONN;
+
+void
+solisten_proto(struct socket *so, int backlog)
+{
+
+ SOCK_LOCK_ASSERT(so);
+
+ if (backlog < 0 || backlog > somaxconn)
+ backlog = somaxconn;
+ so->so_qlimit = backlog;
+ so->so_options |= SCTP_SO_ACCEPTCONN;
+}
+
+
+
+
+/* Taken from /src/sys/kern/uipc_syscalls.c
+ * and modified for __Userspace__
+ */
+
+int
+usrsctp_listen(struct socket *so, int backlog)
+{
+ errno = solisten(so, backlog);
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+}
+
+int
+userspace_listen(struct socket *so, int backlog)
+{
+ return (usrsctp_listen(so, backlog));
+}
+
+/* Taken from /src/sys/kern/uipc_socket.c
+ * and modified for __Userspace__
+ */
+
+int
+soaccept(struct socket *so, struct sockaddr **nam)
+{
+ int error;
+
+ SOCK_LOCK(so);
+ KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
+ so->so_state &= ~SS_NOFDREF;
+ SOCK_UNLOCK(so);
+ error = sctp_accept(so, nam);
+ return (error);
+}
+
+
+
+/* Taken from /src/sys/kern/uipc_syscalls.c
+ * kern_accept modified for __Userspace__
+ */
+int
+user_accept(struct socket *head, struct sockaddr **name, socklen_t *namelen, struct socket **ptr_accept_ret_sock)
+{
+ struct sockaddr *sa = NULL;
+ int error;
+ struct socket *so = NULL;
+
+
+ if (name) {
+ *name = NULL;
+ }
+
+ if ((head->so_options & SCTP_SO_ACCEPTCONN) == 0) {
+ error = EINVAL;
+ goto done;
+ }
+
+ ACCEPT_LOCK();
+ if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
+ ACCEPT_UNLOCK();
+ error = EWOULDBLOCK;
+ goto noconnection;
+ }
+ while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
+ if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ head->so_error = ECONNABORTED;
+ break;
+ }
+#if defined(_WIN32)
+ if (SleepConditionVariableCS(&accept_cond, &accept_mtx, INFINITE))
+ error = 0;
+ else
+ error = GetLastError();
+#else
+ error = pthread_cond_wait(&accept_cond, &accept_mtx);
+#endif
+ if (error) {
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ }
+ if (head->so_error) {
+ error = head->so_error;
+ head->so_error = 0;
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ so = TAILQ_FIRST(&head->so_comp);
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+
+ /*
+ * The original accept returns fd value via td->td_retval[0] = fd;
+ * we will return the socket for accepted connection.
+ */
+
+ error = soaccept(so, &sa);
+ if (error) {
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (name)
+ *namelen = 0;
+ goto noconnection;
+ }
+ if (sa == NULL) {
+ if (name)
+ *namelen = 0;
+ goto done;
+ }
+ if (name) {
+#ifdef HAVE_SA_LEN
+ /* check sa_len before it is destroyed */
+ if (*namelen > sa->sa_len) {
+ *namelen = sa->sa_len;
+ }
+#else
+ socklen_t sa_len;
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa_len = sizeof(struct sockaddr_in);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ case AF_CONN:
+ sa_len = sizeof(struct sockaddr_conn);
+ break;
+ default:
+ sa_len = 0;
+ break;
+ }
+ if (*namelen > sa_len) {
+ *namelen = sa_len;
+ }
+#endif
+ *name = sa;
+ sa = NULL;
+ }
+noconnection:
+ if (sa) {
+ FREE(sa, M_SONAME);
+ }
+
+done:
+ *ptr_accept_ret_sock = so;
+ return (error);
+}
+
+
+
+/* Taken from /src/sys/kern/uipc_syscalls.c
+ * and modified for __Userspace__
+ */
+/*
+ * accept1()
+ */
+static int
+accept1(struct socket *so, struct sockaddr *aname, socklen_t *anamelen, struct socket **ptr_accept_ret_sock)
+{
+ struct sockaddr *name;
+ socklen_t namelen;
+ int error;
+
+ if (so == NULL) {
+ return (EBADF);
+ }
+ if (aname == NULL) {
+ return (user_accept(so, NULL, NULL, ptr_accept_ret_sock));
+ }
+
+ error = copyin(anamelen, &namelen, sizeof (namelen));
+ if (error)
+ return (error);
+
+ error = user_accept(so, &name, &namelen, ptr_accept_ret_sock);
+
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (error) {
+ (void) copyout(&namelen,
+ anamelen, sizeof(*anamelen));
+ return (error);
+ }
+
+ if (error == 0 && name != NULL) {
+ error = copyout(name, aname, namelen);
+ }
+ if (error == 0) {
+ error = copyout(&namelen, anamelen, sizeof(namelen));
+ }
+
+ if (name) {
+ FREE(name, M_SONAME);
+ }
+ return (error);
+}
+
+struct socket *
+usrsctp_accept(struct socket *so, struct sockaddr *aname, socklen_t *anamelen)
+{
+ struct socket *accept_return_sock = NULL;
+
+ errno = accept1(so, aname, anamelen, &accept_return_sock);
+ if (errno) {
+ return (NULL);
+ } else {
+ return (accept_return_sock);
+ }
+}
+
+struct socket *
+userspace_accept(struct socket *so, struct sockaddr *aname, socklen_t *anamelen)
+{
+ return (usrsctp_accept(so, aname, anamelen));
+}
+
+struct socket *
+usrsctp_peeloff(struct socket *head, sctp_assoc_t id)
+{
+ struct socket *so;
+
+ if ((errno = sctp_can_peel_off(head, id)) != 0) {
+ return (NULL);
+ }
+ if ((so = sonewconn(head, SS_ISCONNECTED)) == NULL) {
+ return (NULL);
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ soref(so);
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ if ((errno = sctp_do_peeloff(head, so, id)) != 0) {
+ so->so_count = 0;
+ sodealloc(so);
+ return (NULL);
+ }
+ return (so);
+}
+
+int
+sodisconnect(struct socket *so)
+{
+ int error;
+
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (ENOTCONN);
+ if (so->so_state & SS_ISDISCONNECTING)
+ return (EALREADY);
+ error = sctp_disconnect(so);
+ return (error);
+}
+
+int
+usrsctp_set_non_blocking(struct socket *so, int onoff)
+{
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ SOCK_LOCK(so);
+ if (onoff != 0) {
+ so->so_state |= SS_NBIO;
+ } else {
+ so->so_state &= ~SS_NBIO;
+ }
+ SOCK_UNLOCK(so);
+ return (0);
+}
+
+int
+usrsctp_get_non_blocking(struct socket *so)
+{
+ int result;
+
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ SOCK_LOCK(so);
+ if (so->so_state & SS_NBIO) {
+ result = 1;
+ } else {
+ result = 0;
+ }
+ SOCK_UNLOCK(so);
+ return (result);
+}
+
+int
+soconnect(struct socket *so, struct sockaddr *nam)
+{
+ int error;
+
+ if (so->so_options & SCTP_SO_ACCEPTCONN)
+ return (EOPNOTSUPP);
+ /*
+ * If protocol is connection-based, can only connect once.
+ * Otherwise, if connected, try to disconnect first. This allows
+ * user to disconnect by connecting to, e.g., a null address.
+ */
+ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && (error = sodisconnect(so))) {
+ error = EISCONN;
+ } else {
+ /*
+ * Prevent accumulated error from previous connection from
+ * biting us.
+ */
+ so->so_error = 0;
+ switch (nam->sa_family) {
+#if defined(INET)
+ case AF_INET:
+ error = sctp_connect(so, nam);
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ error = sctp6_connect(so, nam);
+ break;
+#endif
+ case AF_CONN:
+ error = sctpconn_connect(so, nam);
+ break;
+ default:
+ error = EAFNOSUPPORT;
+ }
+ }
+
+ return (error);
+}
+
+
+
+int user_connect(struct socket *so, struct sockaddr *sa)
+{
+ int error;
+ int interrupted = 0;
+
+ if (so == NULL) {
+ error = EBADF;
+ goto done1;
+ }
+ if (so->so_state & SS_ISCONNECTING) {
+ error = EALREADY;
+ goto done1;
+ }
+
+ error = soconnect(so, sa);
+ if (error) {
+ goto bad;
+ }
+ if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
+ error = EINPROGRESS;
+ goto done1;
+ }
+
+ SOCK_LOCK(so);
+ while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
+#if defined(_WIN32)
+ if (SleepConditionVariableCS(SOCK_COND(so), SOCK_MTX(so), INFINITE))
+ error = 0;
+ else
+ error = -1;
+#else
+ error = pthread_cond_wait(SOCK_COND(so), SOCK_MTX(so));
+#endif
+ if (error) {
+#if defined(__NetBSD__)
+ if (error == EINTR) {
+#else
+ if (error == EINTR || error == ERESTART) {
+#endif
+ interrupted = 1;
+ }
+ break;
+ }
+ }
+ if (error == 0) {
+ error = so->so_error;
+ so->so_error = 0;
+ }
+ SOCK_UNLOCK(so);
+
+bad:
+ if (!interrupted) {
+ so->so_state &= ~SS_ISCONNECTING;
+ }
+#if !defined(__NetBSD__)
+ if (error == ERESTART) {
+ error = EINTR;
+ }
+#endif
+done1:
+ return (error);
+}
+
+int usrsctp_connect(struct socket *so, struct sockaddr *name, int namelen)
+{
+ struct sockaddr *sa = NULL;
+
+ errno = getsockaddr(&sa, (caddr_t)name, namelen);
+ if (errno)
+ return (-1);
+
+ errno = user_connect(so, sa);
+ FREE(sa, M_SONAME);
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+}
+
+int userspace_connect(struct socket *so, struct sockaddr *name, int namelen)
+{
+ return (usrsctp_connect(so, name, namelen));
+}
+
+#define SCTP_STACK_BUF_SIZE 2048
+
+void
+usrsctp_close(struct socket *so) {
+ if (so != NULL) {
+ if (so->so_options & SCTP_SO_ACCEPTCONN) {
+ struct socket *sp;
+
+ ACCEPT_LOCK();
+ while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
+ TAILQ_REMOVE(&so->so_comp, sp, so_list);
+ so->so_qlen--;
+ sp->so_qstate &= ~SQ_COMP;
+ sp->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(sp);
+ ACCEPT_LOCK();
+ }
+ ACCEPT_UNLOCK();
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ sorele(so);
+ }
+}
+
+void
+userspace_close(struct socket *so)
+{
+ usrsctp_close(so);
+}
+
+int
+usrsctp_shutdown(struct socket *so, int how)
+{
+ if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ sctp_flush(so, how);
+ if (how != SHUT_WR)
+ socantrcvmore(so);
+ if (how != SHUT_RD) {
+ errno = sctp_shutdown(so);
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+ }
+ return (0);
+}
+
+int
+userspace_shutdown(struct socket *so, int how)
+{
+ return (usrsctp_shutdown(so, how));
+}
+
+int
+usrsctp_finish(void)
+{
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+ return (0);
+ }
+ if (SCTP_INP_INFO_TRYLOCK()) {
+ if (!LIST_EMPTY(&SCTP_BASE_INFO(listhead))) {
+ SCTP_INP_INFO_RUNLOCK();
+ return (-1);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ } else {
+ return (-1);
+ }
+ sctp_finish();
+#if defined(_WIN32)
+ DeleteConditionVariable(&accept_cond);
+ DeleteCriticalSection(&accept_mtx);
+#if defined(INET) || defined(INET6)
+ WSACleanup();
+#endif
+#else
+ pthread_cond_destroy(&accept_cond);
+ pthread_mutex_destroy(&accept_mtx);
+#endif
+ return (0);
+}
+
+int
+userspace_finish(void)
+{
+ return (usrsctp_finish());
+}
+
+/* needed from sctp_usrreq.c */
+int
+sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, void *p);
+
+int
+usrsctp_setsockopt(struct socket *so, int level, int option_name,
+ const void *option_value, socklen_t option_len)
+{
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ switch (level) {
+ case SOL_SOCKET:
+ {
+ switch (option_name) {
+ case SO_RCVBUF:
+ if (option_len < (socklen_t)sizeof(int)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ int *buf_size;
+
+ buf_size = (int *)option_value;
+ if (*buf_size < 1) {
+ errno = EINVAL;
+ return (-1);
+ }
+ sbreserve(&so->so_rcv, (u_long)*buf_size, so);
+ return (0);
+ }
+ break;
+ case SO_SNDBUF:
+ if (option_len < (socklen_t)sizeof(int)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ int *buf_size;
+
+ buf_size = (int *)option_value;
+ if (*buf_size < 1) {
+ errno = EINVAL;
+ return (-1);
+ }
+ sbreserve(&so->so_snd, (u_long)*buf_size, so);
+ return (0);
+ }
+ break;
+ case SO_LINGER:
+ if (option_len < (socklen_t)sizeof(struct linger)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ struct linger *l;
+
+ l = (struct linger *)option_value;
+ so->so_linger = l->l_linger;
+ if (l->l_onoff) {
+ so->so_options |= SCTP_SO_LINGER;
+ } else {
+ so->so_options &= ~SCTP_SO_LINGER;
+ }
+ return (0);
+ }
+ default:
+ errno = EINVAL;
+ return (-1);
+ }
+ }
+ case IPPROTO_SCTP:
+ errno = sctp_setopt(so, option_name, (void *) option_value, (size_t)option_len, NULL);
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+ default:
+ errno = ENOPROTOOPT;
+ return (-1);
+ }
+}
+
+int
+userspace_setsockopt(struct socket *so, int level, int option_name,
+ const void *option_value, socklen_t option_len)
+{
+ return (usrsctp_setsockopt(so, level, option_name, option_value, option_len));
+}
+
+/* needed from sctp_usrreq.c */
+int
+sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
+ void *p);
+
+int
+usrsctp_getsockopt(struct socket *so, int level, int option_name,
+ void *option_value, socklen_t *option_len)
+{
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+ if (option_len == NULL) {
+ errno = EFAULT;
+ return (-1);
+ }
+ switch (level) {
+ case SOL_SOCKET:
+ switch (option_name) {
+ case SO_RCVBUF:
+ if (*option_len < (socklen_t)sizeof(int)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ int *buf_size;
+
+ buf_size = (int *)option_value;
+ *buf_size = so->so_rcv.sb_hiwat;;
+ *option_len = (socklen_t)sizeof(int);
+ return (0);
+ }
+ break;
+ case SO_SNDBUF:
+ if (*option_len < (socklen_t)sizeof(int)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ int *buf_size;
+
+ buf_size = (int *)option_value;
+ *buf_size = so->so_snd.sb_hiwat;
+ *option_len = (socklen_t)sizeof(int);
+ return (0);
+ }
+ break;
+ case SO_LINGER:
+ if (*option_len < (socklen_t)sizeof(struct linger)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ struct linger *l;
+
+ l = (struct linger *)option_value;
+ l->l_linger = so->so_linger;
+ if (so->so_options & SCTP_SO_LINGER) {
+ l->l_onoff = 1;
+ } else {
+ l->l_onoff = 0;
+ }
+ *option_len = (socklen_t)sizeof(struct linger);
+ return (0);
+ }
+ break;
+ case SO_ERROR:
+ if (*option_len < (socklen_t)sizeof(int)) {
+ errno = EINVAL;
+ return (-1);
+ } else {
+ int *intval;
+
+ intval = (int *)option_value;
+ *intval = so->so_error;
+ *option_len = (socklen_t)sizeof(int);
+ return (0);
+ }
+ break;
+ default:
+ errno = EINVAL;
+ return (-1);
+ }
+ case IPPROTO_SCTP:
+ {
+ size_t len;
+
+ len = (size_t)*option_len;
+ errno = sctp_getopt(so, option_name, option_value, &len, NULL);
+ *option_len = (socklen_t)len;
+ if (errno) {
+ return (-1);
+ } else {
+ return (0);
+ }
+ }
+ default:
+ errno = ENOPROTOOPT;
+ return (-1);
+ }
+}
+
+int
+userspace_getsockopt(struct socket *so, int level, int option_name,
+ void *option_value, socklen_t *option_len)
+{
+ return (usrsctp_getsockopt(so, level, option_name, option_value, option_len));
+}
+
+int
+usrsctp_opt_info(struct socket *so, sctp_assoc_t id, int opt, void *arg, socklen_t *size)
+{
+ if (arg == NULL) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if ((id == SCTP_CURRENT_ASSOC) ||
+ (id == SCTP_ALL_ASSOC)) {
+ errno = EINVAL;
+ return (-1);
+ }
+ switch (opt) {
+ case SCTP_RTOINFO:
+ ((struct sctp_rtoinfo *)arg)->srto_assoc_id = id;
+ break;
+ case SCTP_ASSOCINFO:
+ ((struct sctp_assocparams *)arg)->sasoc_assoc_id = id;
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ ((struct sctp_assocparams *)arg)->sasoc_assoc_id = id;
+ break;
+ case SCTP_PRIMARY_ADDR:
+ ((struct sctp_setprim *)arg)->ssp_assoc_id = id;
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ ((struct sctp_paddrparams *)arg)->spp_assoc_id = id;
+ break;
+ case SCTP_MAXSEG:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_AUTH_KEY:
+ ((struct sctp_authkey *)arg)->sca_assoc_id = id;
+ break;
+ case SCTP_AUTH_ACTIVE_KEY:
+ ((struct sctp_authkeyid *)arg)->scact_assoc_id = id;
+ break;
+ case SCTP_DELAYED_SACK:
+ ((struct sctp_sack_info *)arg)->sack_assoc_id = id;
+ break;
+ case SCTP_CONTEXT:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_STATUS:
+ ((struct sctp_status *)arg)->sstat_assoc_id = id;
+ break;
+ case SCTP_GET_PEER_ADDR_INFO:
+ ((struct sctp_paddrinfo *)arg)->spinfo_assoc_id = id;
+ break;
+ case SCTP_PEER_AUTH_CHUNKS:
+ ((struct sctp_authchunks *)arg)->gauth_assoc_id = id;
+ break;
+ case SCTP_LOCAL_AUTH_CHUNKS:
+ ((struct sctp_authchunks *)arg)->gauth_assoc_id = id;
+ break;
+ case SCTP_TIMEOUTS:
+ ((struct sctp_timeouts *)arg)->stimo_assoc_id = id;
+ break;
+ case SCTP_EVENT:
+ ((struct sctp_event *)arg)->se_assoc_id = id;
+ break;
+ case SCTP_DEFAULT_SNDINFO:
+ ((struct sctp_sndinfo *)arg)->snd_assoc_id = id;
+ break;
+ case SCTP_DEFAULT_PRINFO:
+ ((struct sctp_default_prinfo *)arg)->pr_assoc_id = id;
+ break;
+ case SCTP_PEER_ADDR_THLDS:
+ ((struct sctp_paddrthlds *)arg)->spt_assoc_id = id;
+ break;
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ ((struct sctp_udpencaps *)arg)->sue_assoc_id = id;
+ break;
+ case SCTP_ECN_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_PR_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_AUTH_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_ASCONF_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_RECONFIG_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_NRSACK_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_PKTDROP_SUPPORTED:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_MAX_BURST:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_ENABLE_STREAM_RESET:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ case SCTP_PR_STREAM_STATUS:
+ ((struct sctp_prstatus *)arg)->sprstat_assoc_id = id;
+ break;
+ case SCTP_PR_ASSOC_STATUS:
+ ((struct sctp_prstatus *)arg)->sprstat_assoc_id = id;
+ break;
+ case SCTP_MAX_CWND:
+ ((struct sctp_assoc_value *)arg)->assoc_id = id;
+ break;
+ default:
+ break;
+ }
+ return (usrsctp_getsockopt(so, IPPROTO_SCTP, opt, arg, size));
+}
+
+int
+usrsctp_set_ulpinfo(struct socket *so, void *ulp_info)
+{
+ return (register_ulp_info(so, ulp_info));
+}
+
+
+int
+usrsctp_get_ulpinfo(struct socket *so, void **pulp_info)
+{
+ return (retrieve_ulp_info(so, pulp_info));
+}
+
+int
+usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags)
+{
+ struct sockaddr *sa;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ int i;
+#if defined(INET) || defined(INET6)
+ uint16_t sport;
+ bool fix_port;
+#endif
+
+ /* validate the flags */
+ if ((flags != SCTP_BINDX_ADD_ADDR) &&
+ (flags != SCTP_BINDX_REM_ADDR)) {
+ errno = EFAULT;
+ return (-1);
+ }
+ /* validate the address count and list */
+ if ((addrcnt <= 0) || (addrs == NULL)) {
+ errno = EINVAL;
+ return (-1);
+ }
+#if defined(INET) || defined(INET6)
+ sport = 0;
+ fix_port = false;
+#endif
+ /* First pre-screen the addresses */
+ sa = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ errno = EINVAL;
+ return (-1);
+ }
+#endif
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_port) {
+ /* non-zero port, check or save */
+ if (sport) {
+ /* Check against our port */
+ if (sport != sin->sin_port) {
+ errno = EINVAL;
+ return (-1);
+ }
+ } else {
+ /* save off the port */
+ sport = sin->sin_port;
+ fix_port = (i > 0);
+ }
+ }
+#ifndef HAVE_SA_LEN
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+#endif
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ errno = EINVAL;
+ return (-1);
+ }
+#endif
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (sin6->sin6_port) {
+ /* non-zero port, check or save */
+ if (sport) {
+ /* Check against our port */
+ if (sport != sin6->sin6_port) {
+ errno = EINVAL;
+ return (-1);
+ }
+ } else {
+ /* save off the port */
+ sport = sin6->sin6_port;
+ fix_port = (i > 0);
+ }
+ }
+#ifndef HAVE_SA_LEN
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+#endif
+ break;
+#endif
+ default:
+ /* Invalid address family specified. */
+ errno = EAFNOSUPPORT;
+ return (-1);
+ }
+#ifdef HAVE_SA_LEN
+ sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len);
+#endif
+ }
+ sa = addrs;
+ for (i = 0; i < addrcnt; i++) {
+#ifndef HAVE_SA_LEN
+ size_t sa_len;
+
+#endif
+#ifdef HAVE_SA_LEN
+#if defined(INET) || defined(INET6)
+ if (fix_port) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct sockaddr_in *)sa)->sin_port = sport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sa)->sin6_port = sport;
+ break;
+#endif
+ }
+ }
+#endif
+ if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, sa, sa->sa_len) != 0) {
+ return (-1);
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len);
+#else
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa_len = sizeof(struct sockaddr_in);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ default:
+ sa_len = 0;
+ break;
+ }
+ /*
+ * Now, if there was a port mentioned, assure that the
+ * first address has that port to make sure it fails or
+ * succeeds correctly.
+ */
+#if defined(INET) || defined(INET6)
+ if (fix_port) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct sockaddr_in *)sa)->sin_port = sport;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sa)->sin6_port = sport;
+ break;
+#endif
+ }
+ }
+#endif
+ if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, sa, (socklen_t)sa_len) != 0) {
+ return (-1);
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + sa_len);
+#endif
+ }
+ return (0);
+}
+
+int
+usrsctp_connectx(struct socket *so,
+ const struct sockaddr *addrs, int addrcnt,
+ sctp_assoc_t *id)
+{
+#if defined(INET) || defined(INET6)
+ char buf[SCTP_STACK_BUF_SIZE];
+ int i, ret, cnt, *aa;
+ char *cpto;
+ const struct sockaddr *at;
+ sctp_assoc_t *p_id;
+ size_t len = sizeof(int);
+
+ /* validate the address count and list */
+ if ((addrs == NULL) || (addrcnt <= 0)) {
+ errno = EINVAL;
+ return (-1);
+ }
+ at = addrs;
+ cnt = 0;
+ cpto = ((caddr_t)buf + sizeof(int));
+ /* validate all the addresses and get the size */
+ for (i = 0; i < addrcnt; i++) {
+ switch (at->sa_family) {
+#ifdef INET
+ case AF_INET:
+#ifdef HAVE_SA_LEN
+ if (at->sa_len != sizeof(struct sockaddr_in)) {
+ errno = EINVAL;
+ return (-1);
+ }
+#endif
+ len += sizeof(struct sockaddr_in);
+ if (len > SCTP_STACK_BUF_SIZE) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ memcpy(cpto, at, sizeof(struct sockaddr_in));
+ cpto = ((caddr_t)cpto + sizeof(struct sockaddr_in));
+ at = (struct sockaddr *)((caddr_t)at + sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+#ifdef HAVE_SA_LEN
+ if (at->sa_len != sizeof(struct sockaddr_in6)) {
+ errno = EINVAL;
+ return (-1);
+ }
+#endif
+#ifdef INET
+ if (IN6_IS_ADDR_V4MAPPED(&((struct sockaddr_in6 *)at)->sin6_addr)) {
+ len += sizeof(struct sockaddr_in);
+ if (len > SCTP_STACK_BUF_SIZE) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ in6_sin6_2_sin((struct sockaddr_in *)cpto, (struct sockaddr_in6 *)at);
+ cpto = ((caddr_t)cpto + sizeof(struct sockaddr_in));
+ } else {
+ len += sizeof(struct sockaddr_in6);
+ if (len > SCTP_STACK_BUF_SIZE) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ memcpy(cpto, at, sizeof(struct sockaddr_in6));
+ cpto = ((caddr_t)cpto + sizeof(struct sockaddr_in6));
+ }
+#else
+ len += sizeof(struct sockaddr_in6);
+ if (len > SCTP_STACK_BUF_SIZE) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ memcpy(cpto, at, sizeof(struct sockaddr_in6));
+ cpto = ((caddr_t)cpto + sizeof(struct sockaddr_in6));
+#endif
+ at = (struct sockaddr *)((caddr_t)at + sizeof(struct sockaddr_in6));
+ break;
+#endif
+ default:
+ errno = EINVAL;
+ return (-1);
+ }
+ cnt++;
+ }
+ aa = (int *)buf;
+ *aa = cnt;
+ ret = usrsctp_setsockopt(so, IPPROTO_SCTP, SCTP_CONNECT_X, (void *)buf, (socklen_t)len);
+ if ((ret == 0) && id) {
+ p_id = (sctp_assoc_t *)buf;
+ *id = *p_id;
+ }
+ return (ret);
+#else
+ errno = EINVAL;
+ return (-1);
+#endif
+}
+
+int
+usrsctp_getpaddrs(struct socket *so, sctp_assoc_t id, struct sockaddr **raddrs)
+{
+ struct sctp_getaddresses *addrs;
+ struct sockaddr *sa;
+ sctp_assoc_t asoc;
+ caddr_t lim;
+ socklen_t opt_len;
+ int cnt;
+
+ if (raddrs == NULL) {
+ errno = EFAULT;
+ return (-1);
+ }
+ asoc = id;
+ opt_len = (socklen_t)sizeof(sctp_assoc_t);
+ if (usrsctp_getsockopt(so, IPPROTO_SCTP, SCTP_GET_REMOTE_ADDR_SIZE, &asoc, &opt_len) != 0) {
+ return (-1);
+ }
+ /* size required is returned in 'asoc' */
+ opt_len = (socklen_t)((size_t)asoc + sizeof(struct sctp_getaddresses));
+ addrs = calloc(1, (size_t)opt_len);
+ if (addrs == NULL) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ addrs->sget_assoc_id = id;
+ /* Now lets get the array of addresses */
+ if (usrsctp_getsockopt(so, IPPROTO_SCTP, SCTP_GET_PEER_ADDRESSES, addrs, &opt_len) != 0) {
+ free(addrs);
+ return (-1);
+ }
+ *raddrs = &addrs->addr[0].sa;
+ cnt = 0;
+ sa = &addrs->addr[0].sa;
+ lim = (caddr_t)addrs + opt_len;
+#ifdef HAVE_SA_LEN
+ while (((caddr_t)sa < lim) && (sa->sa_len > 0)) {
+ sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len);
+#else
+ while ((caddr_t)sa < lim) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+ break;
+#endif
+ case AF_CONN:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_conn));
+ break;
+ default:
+ return (cnt);
+ break;
+ }
+#endif
+ cnt++;
+ }
+ return (cnt);
+}
+
+void
+usrsctp_freepaddrs(struct sockaddr *addrs)
+{
+ /* Take away the hidden association id */
+ void *fr_addr;
+
+ fr_addr = (void *)((caddr_t)addrs - offsetof(struct sctp_getaddresses, addr));
+ /* Now free it */
+ free(fr_addr);
+}
+
+int
+usrsctp_getladdrs(struct socket *so, sctp_assoc_t id, struct sockaddr **raddrs)
+{
+ struct sctp_getaddresses *addrs;
+ caddr_t lim;
+ struct sockaddr *sa;
+ size_t size_of_addresses;
+ socklen_t opt_len;
+ int cnt;
+
+ if (raddrs == NULL) {
+ errno = EFAULT;
+ return (-1);
+ }
+ size_of_addresses = 0;
+ opt_len = (socklen_t)sizeof(int);
+ if (usrsctp_getsockopt(so, IPPROTO_SCTP, SCTP_GET_LOCAL_ADDR_SIZE, &size_of_addresses, &opt_len) != 0) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ if (size_of_addresses == 0) {
+ errno = ENOTCONN;
+ return (-1);
+ }
+ opt_len = (socklen_t)(size_of_addresses + sizeof(struct sctp_getaddresses));
+ addrs = calloc(1, (size_t)opt_len);
+ if (addrs == NULL) {
+ errno = ENOMEM;
+ return (-1);
+ }
+ addrs->sget_assoc_id = id;
+ /* Now lets get the array of addresses */
+ if (usrsctp_getsockopt(so, IPPROTO_SCTP, SCTP_GET_LOCAL_ADDRESSES, addrs, &opt_len) != 0) {
+ free(addrs);
+ errno = ENOMEM;
+ return (-1);
+ }
+ *raddrs = &addrs->addr[0].sa;
+ cnt = 0;
+ sa = &addrs->addr[0].sa;
+ lim = (caddr_t)addrs + opt_len;
+#ifdef HAVE_SA_LEN
+ while (((caddr_t)sa < lim) && (sa->sa_len > 0)) {
+ sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len);
+#else
+ while ((caddr_t)sa < lim) {
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+ break;
+#endif
+ case AF_CONN:
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_conn));
+ break;
+ default:
+ return (cnt);
+ break;
+ }
+#endif
+ cnt++;
+ }
+ return (cnt);
+}
+
+void
+usrsctp_freeladdrs(struct sockaddr *addrs)
+{
+ /* Take away the hidden association id */
+ void *fr_addr;
+
+ fr_addr = (void *)((caddr_t)addrs - offsetof(struct sctp_getaddresses, addr));
+ /* Now free it */
+ free(fr_addr);
+}
+
+#ifdef INET
+void
+sctp_userspace_ip_output(int *result, struct mbuf *o_pak,
+ sctp_route_t *ro, void *stcb,
+ uint32_t vrf_id)
+{
+ struct mbuf *m;
+ struct mbuf *m_orig;
+ int iovcnt;
+ int len;
+ int send_count;
+ struct ip *ip;
+ struct udphdr *udp;
+ struct sockaddr_in dst;
+#if defined(_WIN32)
+ WSAMSG win_msg_hdr;
+ DWORD win_sent_len;
+ WSABUF send_iovec[MAXLEN_MBUF_CHAIN];
+ WSABUF winbuf;
+#else
+ struct iovec send_iovec[MAXLEN_MBUF_CHAIN];
+ struct msghdr msg_hdr;
+#endif
+ int use_udp_tunneling;
+
+ *result = 0;
+
+ m = SCTP_HEADER_TO_CHAIN(o_pak);
+ m_orig = m;
+
+ len = sizeof(struct ip);
+ if (SCTP_BUF_LEN(m) < len) {
+ if ((m = m_pullup(m, len)) == 0) {
+ SCTP_PRINTF("Can not get the IP header in the first mbuf.\n");
+ return;
+ }
+ }
+ ip = mtod(m, struct ip *);
+ use_udp_tunneling = (ip->ip_p == IPPROTO_UDP);
+
+ if (use_udp_tunneling) {
+ len = sizeof(struct ip) + sizeof(struct udphdr);
+ if (SCTP_BUF_LEN(m) < len) {
+ if ((m = m_pullup(m, len)) == 0) {
+ SCTP_PRINTF("Can not get the UDP/IP header in the first mbuf.\n");
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ udp = (struct udphdr *)(ip + 1);
+ } else {
+ udp = NULL;
+ }
+
+ if (!use_udp_tunneling) {
+ if (ip->ip_src.s_addr == INADDR_ANY) {
+ /* TODO get addr of outgoing interface */
+ SCTP_PRINTF("Why did the SCTP implementation did not choose a source address?\n");
+ }
+ /* TODO need to worry about ro->ro_dst as in ip_output? */
+#if defined(__linux__) || defined(_WIN32) || (defined(__FreeBSD__) && (__FreeBSD_version >= 1100030))
+ /* need to put certain fields into network order for Linux */
+ ip->ip_len = htons(ip->ip_len);
+#endif
+ }
+
+ memset((void *)&dst, 0, sizeof(struct sockaddr_in));
+ dst.sin_family = AF_INET;
+ dst.sin_addr.s_addr = ip->ip_dst.s_addr;
+#ifdef HAVE_SIN_LEN
+ dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+ if (use_udp_tunneling) {
+ dst.sin_port = udp->uh_dport;
+ } else {
+ dst.sin_port = 0;
+ }
+
+ /* tweak the mbuf chain */
+ if (use_udp_tunneling) {
+ m_adj(m, sizeof(struct ip) + sizeof(struct udphdr));
+ }
+
+ send_count = 0;
+ for (iovcnt = 0; m != NULL && iovcnt < MAXLEN_MBUF_CHAIN; m = m->m_next, iovcnt++) {
+#if !defined(_WIN32)
+ send_iovec[iovcnt].iov_base = (caddr_t)m->m_data;
+ send_iovec[iovcnt].iov_len = SCTP_BUF_LEN(m);
+ send_count += send_iovec[iovcnt].iov_len;
+#else
+ send_iovec[iovcnt].buf = (caddr_t)m->m_data;
+ send_iovec[iovcnt].len = SCTP_BUF_LEN(m);
+ send_count += send_iovec[iovcnt].len;
+#endif
+ }
+
+ if (m != NULL) {
+ SCTP_PRINTF("mbuf chain couldn't be copied completely\n");
+ goto free_mbuf;
+ }
+
+#if !defined(_WIN32)
+ msg_hdr.msg_name = (struct sockaddr *) &dst;
+ msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
+ msg_hdr.msg_iov = send_iovec;
+ msg_hdr.msg_iovlen = iovcnt;
+ msg_hdr.msg_control = NULL;
+ msg_hdr.msg_controllen = 0;
+ msg_hdr.msg_flags = 0;
+
+ if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp) != -1)) {
+ if (sendmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg_hdr, MSG_DONTWAIT) < 0) {
+ *result = errno;
+ }
+ }
+ if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp) != -1)) {
+ if (sendmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg_hdr, MSG_DONTWAIT) < 0) {
+ *result = errno;
+ }
+ }
+#else
+ win_msg_hdr.name = (struct sockaddr *) &dst;
+ win_msg_hdr.namelen = sizeof(struct sockaddr_in);
+ win_msg_hdr.lpBuffers = (LPWSABUF)send_iovec;
+ win_msg_hdr.dwBufferCount = iovcnt;
+ winbuf.len = 0;
+ winbuf.buf = NULL;
+ win_msg_hdr.Control = winbuf;
+ win_msg_hdr.dwFlags = 0;
+
+ if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp) != -1)) {
+ if (WSASendTo(SCTP_BASE_VAR(userspace_rawsctp), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) {
+ *result = WSAGetLastError();
+ }
+ }
+ if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp) != -1)) {
+ if (WSASendTo(SCTP_BASE_VAR(userspace_udpsctp), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) {
+ *result = WSAGetLastError();
+ }
+ }
+#endif
+free_mbuf:
+ sctp_m_freem(m_orig);
+}
+#endif
+
+#if defined(INET6)
+void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak,
+ struct route_in6 *ro, void *stcb,
+ uint32_t vrf_id)
+{
+ struct mbuf *m;
+ struct mbuf *m_orig;
+ int iovcnt;
+ int len;
+ int send_count;
+ struct ip6_hdr *ip6;
+ struct udphdr *udp;
+ struct sockaddr_in6 dst;
+#if defined(_WIN32)
+ WSAMSG win_msg_hdr;
+ DWORD win_sent_len;
+ WSABUF send_iovec[MAXLEN_MBUF_CHAIN];
+ WSABUF winbuf;
+#else
+ struct iovec send_iovec[MAXLEN_MBUF_CHAIN];
+ struct msghdr msg_hdr;
+#endif
+ int use_udp_tunneling;
+
+ *result = 0;
+
+ m = SCTP_HEADER_TO_CHAIN(o_pak);
+ m_orig = m;
+
+ len = sizeof(struct ip6_hdr);
+
+ if (SCTP_BUF_LEN(m) < len) {
+ if ((m = m_pullup(m, len)) == 0) {
+ SCTP_PRINTF("Can not get the IP header in the first mbuf.\n");
+ return;
+ }
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ use_udp_tunneling = (ip6->ip6_nxt == IPPROTO_UDP);
+
+ if (use_udp_tunneling) {
+ len = sizeof(struct ip6_hdr) + sizeof(struct udphdr);
+ if (SCTP_BUF_LEN(m) < len) {
+ if ((m = m_pullup(m, len)) == 0) {
+ SCTP_PRINTF("Can not get the UDP/IP header in the first mbuf.\n");
+ return;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ }
+ udp = (struct udphdr *)(ip6 + 1);
+ } else {
+ udp = NULL;
+ }
+
+ if (!use_udp_tunneling) {
+ if (ip6->ip6_src.s6_addr == in6addr_any.s6_addr) {
+ /* TODO get addr of outgoing interface */
+ SCTP_PRINTF("Why did the SCTP implementation did not choose a source address?\n");
+ }
+ /* TODO need to worry about ro->ro_dst as in ip_output? */
+ }
+
+ memset((void *)&dst, 0, sizeof(struct sockaddr_in6));
+ dst.sin6_family = AF_INET6;
+ dst.sin6_addr = ip6->ip6_dst;
+#ifdef HAVE_SIN6_LEN
+ dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+
+ if (use_udp_tunneling) {
+ dst.sin6_port = udp->uh_dport;
+ } else {
+ dst.sin6_port = 0;
+ }
+
+ /* tweak the mbuf chain */
+ if (use_udp_tunneling) {
+ m_adj(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ } else {
+ m_adj(m, sizeof(struct ip6_hdr));
+ }
+
+ send_count = 0;
+ for (iovcnt = 0; m != NULL && iovcnt < MAXLEN_MBUF_CHAIN; m = m->m_next, iovcnt++) {
+#if !defined(_WIN32)
+ send_iovec[iovcnt].iov_base = (caddr_t)m->m_data;
+ send_iovec[iovcnt].iov_len = SCTP_BUF_LEN(m);
+ send_count += send_iovec[iovcnt].iov_len;
+#else
+ send_iovec[iovcnt].buf = (caddr_t)m->m_data;
+ send_iovec[iovcnt].len = SCTP_BUF_LEN(m);
+ send_count += send_iovec[iovcnt].len;
+#endif
+ }
+ if (m != NULL) {
+ SCTP_PRINTF("mbuf chain couldn't be copied completely\n");
+ goto free_mbuf;
+ }
+
+#if !defined(_WIN32)
+ msg_hdr.msg_name = (struct sockaddr *) &dst;
+ msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
+ msg_hdr.msg_iov = send_iovec;
+ msg_hdr.msg_iovlen = iovcnt;
+ msg_hdr.msg_control = NULL;
+ msg_hdr.msg_controllen = 0;
+ msg_hdr.msg_flags = 0;
+
+ if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp6) != -1)) {
+ if (sendmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg_hdr, MSG_DONTWAIT)< 0) {
+ *result = errno;
+ }
+ }
+ if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp6) != -1)) {
+ if (sendmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg_hdr, MSG_DONTWAIT) < 0) {
+ *result = errno;
+ }
+ }
+#else
+ win_msg_hdr.name = (struct sockaddr *) &dst;
+ win_msg_hdr.namelen = sizeof(struct sockaddr_in6);
+ win_msg_hdr.lpBuffers = (LPWSABUF)send_iovec;
+ win_msg_hdr.dwBufferCount = iovcnt;
+ winbuf.len = 0;
+ winbuf.buf = NULL;
+ win_msg_hdr.Control = winbuf;
+ win_msg_hdr.dwFlags = 0;
+
+ if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp6) != -1)) {
+ if (WSASendTo(SCTP_BASE_VAR(userspace_rawsctp6), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) {
+ *result = WSAGetLastError();
+ }
+ }
+ if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp6) != -1)) {
+ if (WSASendTo(SCTP_BASE_VAR(userspace_udpsctp6), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) {
+ *result = WSAGetLastError();
+ }
+ }
+#endif
+free_mbuf:
+ sctp_m_freem(m_orig);
+}
+#endif
+
+void
+usrsctp_register_address(void *addr)
+{
+ struct sockaddr_conn sconn;
+
+ memset(&sconn, 0, sizeof(struct sockaddr_conn));
+ sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ sconn.sconn_port = 0;
+ sconn.sconn_addr = addr;
+ sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID,
+ NULL,
+ 0xffffffff,
+ 0,
+ "conn",
+ NULL,
+ (struct sockaddr *)&sconn,
+ 0,
+ 0);
+}
+
+void
+usrsctp_deregister_address(void *addr)
+{
+ struct sockaddr_conn sconn;
+
+ memset(&sconn, 0, sizeof(struct sockaddr_conn));
+ sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ sconn.sconn_port = 0;
+ sconn.sconn_addr = addr;
+ sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID,
+ (struct sockaddr *)&sconn,
+ 0xffffffff,
+ "conn");
+}
+
+#define PREAMBLE_FORMAT "\n%c %02d:%02d:%02d.%06ld "
+#define PREAMBLE_LENGTH 19
+#define HEADER "0000 "
+#define TRAILER "# SCTP_PACKET\n"
+
+char *
+usrsctp_dumppacket(const void *buf, size_t len, int outbound)
+{
+ size_t i, pos;
+ char *dump_buf, *packet;
+ struct tm t;
+#ifdef _WIN32
+ struct timeb tb;
+#else
+ struct timeval tv;
+ time_t sec;
+#endif
+
+ if ((len == 0) || (buf == NULL)) {
+ return (NULL);
+ }
+ if ((dump_buf = malloc(PREAMBLE_LENGTH + strlen(HEADER) + 3 * len + strlen(TRAILER) + 1)) == NULL) {
+ return (NULL);
+ }
+ pos = 0;
+#ifdef _WIN32
+ ftime(&tb);
+ localtime_s(&t, &tb.time);
+#if defined(__MINGW32__)
+ if (snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT,
+ outbound ? 'O' : 'I',
+ t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)) < 0) {
+ free(dump_buf);
+ return (NULL);
+ }
+#else
+ if (_snprintf_s(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_LENGTH, PREAMBLE_FORMAT,
+ outbound ? 'O' : 'I',
+ t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)) < 0) {
+ free(dump_buf);
+ return (NULL);
+ }
+#endif
+#else
+ gettimeofday(&tv, NULL);
+ sec = (time_t)tv.tv_sec;
+ localtime_r((const time_t *)&sec, &t);
+ if (snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT,
+ outbound ? 'O' : 'I',
+ t.tm_hour, t.tm_min, t.tm_sec, (long)tv.tv_usec) < 0) {
+ free(dump_buf);
+ return (NULL);
+ }
+#endif
+ pos += PREAMBLE_LENGTH;
+#if defined(_WIN32) && !defined(__MINGW32__)
+ strncpy_s(dump_buf + pos, strlen(HEADER) + 1, HEADER, strlen(HEADER));
+#else
+ strcpy(dump_buf + pos, HEADER);
+#endif
+ pos += strlen(HEADER);
+ packet = (char *)buf;
+ for (i = 0; i < len; i++) {
+ uint8_t byte, low, high;
+
+ byte = (uint8_t)packet[i];
+ high = byte / 16;
+ low = byte % 16;
+ dump_buf[pos++] = high < 10 ? '0' + high : 'a' + (high - 10);
+ dump_buf[pos++] = low < 10 ? '0' + low : 'a' + (low - 10);
+ dump_buf[pos++] = ' ';
+ }
+#if defined(_WIN32) && !defined(__MINGW32__)
+ strncpy_s(dump_buf + pos, strlen(TRAILER) + 1, TRAILER, strlen(TRAILER));
+#else
+ strcpy(dump_buf + pos, TRAILER);
+#endif
+ pos += strlen(TRAILER);
+ dump_buf[pos++] = '\0';
+ return (dump_buf);
+}
+
+void
+usrsctp_freedumpbuffer(char *buf)
+{
+ free(buf);
+}
+
+void
+usrsctp_enable_crc32c_offload(void)
+{
+ SCTP_BASE_VAR(crc32c_offloaded) = 1;
+}
+
+void
+usrsctp_disable_crc32c_offload(void)
+{
+ SCTP_BASE_VAR(crc32c_offloaded) = 0;
+}
+
+/* Compute the CRC32C in network byte order */
+uint32_t
+usrsctp_crc32c(void *buffer, size_t length)
+{
+ uint32_t base = 0xffffffff;
+
+ base = calculate_crc32c(0xffffffff, (unsigned char *)buffer, (unsigned int) length);
+ base = sctp_finalize_crc32c(base);
+ return (base);
+}
+
+void
+usrsctp_conninput(void *addr, const void *buffer, size_t length, uint8_t ecn_bits)
+{
+ struct sockaddr_conn src, dst;
+ struct mbuf *m, *mm;
+ struct sctphdr *sh;
+ struct sctp_chunkhdr *ch;
+ int remaining;
+
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+ memset(&src, 0, sizeof(struct sockaddr_conn));
+ src.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ src.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ src.sconn_addr = addr;
+ memset(&dst, 0, sizeof(struct sockaddr_conn));
+ dst.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+ dst.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+ dst.sconn_addr = addr;
+ if ((m = sctp_get_mbuf_for_msg((unsigned int)length, 1, M_NOWAIT, 0, MT_DATA)) == NULL) {
+ return;
+ }
+ /* Set the lengths fields of the mbuf chain.
+ * This is expected by m_copyback().
+ */
+ remaining = (int)length;
+ for (mm = m; mm != NULL; mm = mm->m_next) {
+ mm->m_len = min((int)M_SIZE(mm), remaining);
+ m->m_pkthdr.len += mm->m_len;
+ remaining -= mm->m_len;
+ }
+ KASSERT(remaining == 0, ("usrsctp_conninput: %zu bytes left", remaining));
+ m_copyback(m, 0, (int)length, (caddr_t)buffer);
+ if (SCTP_BUF_LEN(m) < (int)(sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr))) {
+ if ((m = m_pullup(m, sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr))) == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return;
+ }
+ }
+ sh = mtod(m, struct sctphdr *);;
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ src.sconn_port = sh->src_port;
+ dst.sconn_port = sh->dest_port;
+ sctp_common_input_processing(&m, 0, sizeof(struct sctphdr), (int)length,
+ (struct sockaddr *)&src,
+ (struct sockaddr *)&dst,
+ sh, ch,
+ SCTP_BASE_VAR(crc32c_offloaded) == 1 ? 0 : 1,
+ ecn_bits,
+ SCTP_DEFAULT_VRFID, 0);
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return;
+}
+
+void usrsctp_handle_timers(uint32_t elapsed_milliseconds)
+{
+ sctp_handle_tick(sctp_msecs_to_ticks(elapsed_milliseconds));
+}
+
+int
+usrsctp_get_events(struct socket *so)
+{
+ int events = 0;
+
+ if (so == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
+ SOCK_LOCK(so);
+ if (soreadable(so)) {
+ events |= SCTP_EVENT_READ;
+ }
+ if (sowriteable(so)) {
+ events |= SCTP_EVENT_WRITE;
+ }
+ if (so->so_error) {
+ events |= SCTP_EVENT_ERROR;
+ }
+ SOCK_UNLOCK(so);
+
+ return events;
+}
+
+int
+usrsctp_set_upcall(struct socket *so, void (*upcall)(struct socket *, void *, int), void *arg)
+{
+ if (so == NULL) {
+ errno = EBADF;
+ return (-1);
+ }
+
+ SOCK_LOCK(so);
+ so->so_upcall = upcall;
+ so->so_upcallarg = arg;
+ so->so_snd.sb_flags |= SB_UPCALL;
+ so->so_rcv.sb_flags |= SB_UPCALL;
+ SOCK_UNLOCK(so);
+
+ return (0);
+}
+
+#define USRSCTP_TUNABLE_SET_DEF(__field, __prefix) \
+int usrsctp_tunable_set_ ## __field(uint32_t value) \
+{ \
+ if ((value < __prefix##_MIN) || \
+ (value > __prefix##_MAX)) { \
+ errno = EINVAL; \
+ return (-1); \
+ } else { \
+ SCTP_BASE_SYSCTL(__field) = value; \
+ return (0); \
+ } \
+}
+
+USRSCTP_TUNABLE_SET_DEF(sctp_hashtblsize, SCTPCTL_TCBHASHSIZE)
+USRSCTP_TUNABLE_SET_DEF(sctp_pcbtblsize, SCTPCTL_PCBHASHSIZE)
+USRSCTP_TUNABLE_SET_DEF(sctp_chunkscale, SCTPCTL_CHUNKSCALE)
+
+#define USRSCTP_SYSCTL_SET_DEF(__field, __prefix) \
+int usrsctp_sysctl_set_ ## __field(uint32_t value) \
+{ \
+ if ((value < __prefix##_MIN) || \
+ (value > __prefix##_MAX)) { \
+ errno = EINVAL; \
+ return (-1); \
+ } else { \
+ SCTP_BASE_SYSCTL(__field) = value; \
+ return (0); \
+ } \
+}
+
+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#endif
+USRSCTP_SYSCTL_SET_DEF(sctp_sendspace, SCTPCTL_MAXDGRAM)
+USRSCTP_SYSCTL_SET_DEF(sctp_recvspace, SCTPCTL_RECVSPACE)
+USRSCTP_SYSCTL_SET_DEF(sctp_auto_asconf, SCTPCTL_AUTOASCONF)
+USRSCTP_SYSCTL_SET_DEF(sctp_ecn_enable, SCTPCTL_ECN_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_pr_enable, SCTPCTL_PR_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_auth_enable, SCTPCTL_AUTH_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_asconf_enable, SCTPCTL_ASCONF_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_reconfig_enable, SCTPCTL_RECONFIG_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_nrsack_enable, SCTPCTL_NRSACK_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_pktdrop_enable, SCTPCTL_PKTDROP_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_no_csum_on_loopback, SCTPCTL_LOOPBACK_NOCSUM)
+USRSCTP_SYSCTL_SET_DEF(sctp_peer_chunk_oh, SCTPCTL_PEER_CHKOH)
+USRSCTP_SYSCTL_SET_DEF(sctp_max_burst_default, SCTPCTL_MAXBURST)
+USRSCTP_SYSCTL_SET_DEF(sctp_max_chunks_on_queue, SCTPCTL_MAXCHUNKS)
+USRSCTP_SYSCTL_SET_DEF(sctp_min_split_point, SCTPCTL_MIN_SPLIT_POINT)
+USRSCTP_SYSCTL_SET_DEF(sctp_delayed_sack_time_default, SCTPCTL_DELAYED_SACK_TIME)
+USRSCTP_SYSCTL_SET_DEF(sctp_sack_freq_default, SCTPCTL_SACK_FREQ)
+USRSCTP_SYSCTL_SET_DEF(sctp_system_free_resc_limit, SCTPCTL_SYS_RESOURCE)
+USRSCTP_SYSCTL_SET_DEF(sctp_asoc_free_resc_limit, SCTPCTL_ASOC_RESOURCE)
+USRSCTP_SYSCTL_SET_DEF(sctp_heartbeat_interval_default, SCTPCTL_HEARTBEAT_INTERVAL)
+USRSCTP_SYSCTL_SET_DEF(sctp_pmtu_raise_time_default, SCTPCTL_PMTU_RAISE_TIME)
+USRSCTP_SYSCTL_SET_DEF(sctp_shutdown_guard_time_default, SCTPCTL_SHUTDOWN_GUARD_TIME)
+USRSCTP_SYSCTL_SET_DEF(sctp_secret_lifetime_default, SCTPCTL_SECRET_LIFETIME)
+USRSCTP_SYSCTL_SET_DEF(sctp_rto_max_default, SCTPCTL_RTO_MAX)
+USRSCTP_SYSCTL_SET_DEF(sctp_rto_min_default, SCTPCTL_RTO_MIN)
+USRSCTP_SYSCTL_SET_DEF(sctp_rto_initial_default, SCTPCTL_RTO_INITIAL)
+USRSCTP_SYSCTL_SET_DEF(sctp_init_rto_max_default, SCTPCTL_INIT_RTO_MAX)
+USRSCTP_SYSCTL_SET_DEF(sctp_valid_cookie_life_default, SCTPCTL_VALID_COOKIE_LIFE)
+USRSCTP_SYSCTL_SET_DEF(sctp_init_rtx_max_default, SCTPCTL_INIT_RTX_MAX)
+USRSCTP_SYSCTL_SET_DEF(sctp_assoc_rtx_max_default, SCTPCTL_ASSOC_RTX_MAX)
+USRSCTP_SYSCTL_SET_DEF(sctp_path_rtx_max_default, SCTPCTL_PATH_RTX_MAX)
+USRSCTP_SYSCTL_SET_DEF(sctp_add_more_threshold, SCTPCTL_ADD_MORE_ON_OUTPUT)
+USRSCTP_SYSCTL_SET_DEF(sctp_nr_incoming_streams_default, SCTPCTL_INCOMING_STREAMS)
+USRSCTP_SYSCTL_SET_DEF(sctp_nr_outgoing_streams_default, SCTPCTL_OUTGOING_STREAMS)
+USRSCTP_SYSCTL_SET_DEF(sctp_cmt_on_off, SCTPCTL_CMT_ON_OFF)
+USRSCTP_SYSCTL_SET_DEF(sctp_cmt_use_dac, SCTPCTL_CMT_USE_DAC)
+USRSCTP_SYSCTL_SET_DEF(sctp_use_cwnd_based_maxburst, SCTPCTL_CWND_MAXBURST)
+USRSCTP_SYSCTL_SET_DEF(sctp_nat_friendly, SCTPCTL_NAT_FRIENDLY)
+USRSCTP_SYSCTL_SET_DEF(sctp_L2_abc_variable, SCTPCTL_ABC_L_VAR)
+USRSCTP_SYSCTL_SET_DEF(sctp_mbuf_threshold_count, SCTPCTL_MAX_CHAINED_MBUFS)
+USRSCTP_SYSCTL_SET_DEF(sctp_do_drain, SCTPCTL_DO_SCTP_DRAIN)
+USRSCTP_SYSCTL_SET_DEF(sctp_hb_maxburst, SCTPCTL_HB_MAX_BURST)
+USRSCTP_SYSCTL_SET_DEF(sctp_abort_if_one_2_one_hits_limit, SCTPCTL_ABORT_AT_LIMIT)
+USRSCTP_SYSCTL_SET_DEF(sctp_min_residual, SCTPCTL_MIN_RESIDUAL)
+USRSCTP_SYSCTL_SET_DEF(sctp_max_retran_chunk, SCTPCTL_MAX_RETRAN_CHUNK)
+USRSCTP_SYSCTL_SET_DEF(sctp_logging_level, SCTPCTL_LOGGING_LEVEL)
+USRSCTP_SYSCTL_SET_DEF(sctp_default_cc_module, SCTPCTL_DEFAULT_CC_MODULE)
+USRSCTP_SYSCTL_SET_DEF(sctp_default_frag_interleave, SCTPCTL_DEFAULT_FRAG_INTERLEAVE)
+USRSCTP_SYSCTL_SET_DEF(sctp_mobility_base, SCTPCTL_MOBILITY_BASE)
+USRSCTP_SYSCTL_SET_DEF(sctp_mobility_fasthandoff, SCTPCTL_MOBILITY_FASTHANDOFF)
+USRSCTP_SYSCTL_SET_DEF(sctp_inits_include_nat_friendly, SCTPCTL_NAT_FRIENDLY_INITS)
+USRSCTP_SYSCTL_SET_DEF(sctp_udp_tunneling_port, SCTPCTL_UDP_TUNNELING_PORT)
+USRSCTP_SYSCTL_SET_DEF(sctp_enable_sack_immediately, SCTPCTL_SACK_IMMEDIATELY_ENABLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_vtag_time_wait, SCTPCTL_TIME_WAIT)
+USRSCTP_SYSCTL_SET_DEF(sctp_blackhole, SCTPCTL_BLACKHOLE)
+USRSCTP_SYSCTL_SET_DEF(sctp_diag_info_code, SCTPCTL_DIAG_INFO_CODE)
+USRSCTP_SYSCTL_SET_DEF(sctp_fr_max_burst_default, SCTPCTL_FRMAXBURST)
+USRSCTP_SYSCTL_SET_DEF(sctp_path_pf_threshold, SCTPCTL_PATH_PF_THRESHOLD)
+USRSCTP_SYSCTL_SET_DEF(sctp_default_ss_module, SCTPCTL_DEFAULT_SS_MODULE)
+USRSCTP_SYSCTL_SET_DEF(sctp_rttvar_bw, SCTPCTL_RTTVAR_BW)
+USRSCTP_SYSCTL_SET_DEF(sctp_rttvar_rtt, SCTPCTL_RTTVAR_RTT)
+USRSCTP_SYSCTL_SET_DEF(sctp_rttvar_eqret, SCTPCTL_RTTVAR_EQRET)
+USRSCTP_SYSCTL_SET_DEF(sctp_steady_step, SCTPCTL_RTTVAR_STEADYS)
+USRSCTP_SYSCTL_SET_DEF(sctp_use_dccc_ecn, SCTPCTL_RTTVAR_DCCCECN)
+USRSCTP_SYSCTL_SET_DEF(sctp_buffer_splitting, SCTPCTL_BUFFER_SPLITTING)
+USRSCTP_SYSCTL_SET_DEF(sctp_initial_cwnd, SCTPCTL_INITIAL_CWND)
+#ifdef SCTP_DEBUG
+USRSCTP_SYSCTL_SET_DEF(sctp_debug_on, SCTPCTL_DEBUG)
+#endif
+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+#define USRSCTP_SYSCTL_GET_DEF(__field) \
+uint32_t usrsctp_sysctl_get_ ## __field(void) { \
+ return SCTP_BASE_SYSCTL(__field); \
+}
+
+USRSCTP_SYSCTL_GET_DEF(sctp_sendspace)
+USRSCTP_SYSCTL_GET_DEF(sctp_recvspace)
+USRSCTP_SYSCTL_GET_DEF(sctp_auto_asconf)
+USRSCTP_SYSCTL_GET_DEF(sctp_multiple_asconfs)
+USRSCTP_SYSCTL_GET_DEF(sctp_ecn_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_pr_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_auth_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_asconf_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_reconfig_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_nrsack_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_pktdrop_enable)
+USRSCTP_SYSCTL_GET_DEF(sctp_no_csum_on_loopback)
+USRSCTP_SYSCTL_GET_DEF(sctp_peer_chunk_oh)
+USRSCTP_SYSCTL_GET_DEF(sctp_max_burst_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_max_chunks_on_queue)
+USRSCTP_SYSCTL_GET_DEF(sctp_hashtblsize)
+USRSCTP_SYSCTL_GET_DEF(sctp_pcbtblsize)
+USRSCTP_SYSCTL_GET_DEF(sctp_min_split_point)
+USRSCTP_SYSCTL_GET_DEF(sctp_chunkscale)
+USRSCTP_SYSCTL_GET_DEF(sctp_delayed_sack_time_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_sack_freq_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_system_free_resc_limit)
+USRSCTP_SYSCTL_GET_DEF(sctp_asoc_free_resc_limit)
+USRSCTP_SYSCTL_GET_DEF(sctp_heartbeat_interval_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_pmtu_raise_time_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_shutdown_guard_time_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_secret_lifetime_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_rto_max_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_rto_min_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_rto_initial_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_init_rto_max_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_valid_cookie_life_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_init_rtx_max_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_assoc_rtx_max_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_path_rtx_max_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_add_more_threshold)
+USRSCTP_SYSCTL_GET_DEF(sctp_nr_incoming_streams_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_nr_outgoing_streams_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_cmt_on_off)
+USRSCTP_SYSCTL_GET_DEF(sctp_cmt_use_dac)
+USRSCTP_SYSCTL_GET_DEF(sctp_use_cwnd_based_maxburst)
+USRSCTP_SYSCTL_GET_DEF(sctp_nat_friendly)
+USRSCTP_SYSCTL_GET_DEF(sctp_L2_abc_variable)
+USRSCTP_SYSCTL_GET_DEF(sctp_mbuf_threshold_count)
+USRSCTP_SYSCTL_GET_DEF(sctp_do_drain)
+USRSCTP_SYSCTL_GET_DEF(sctp_hb_maxburst)
+USRSCTP_SYSCTL_GET_DEF(sctp_abort_if_one_2_one_hits_limit)
+USRSCTP_SYSCTL_GET_DEF(sctp_min_residual)
+USRSCTP_SYSCTL_GET_DEF(sctp_max_retran_chunk)
+USRSCTP_SYSCTL_GET_DEF(sctp_logging_level)
+USRSCTP_SYSCTL_GET_DEF(sctp_default_cc_module)
+USRSCTP_SYSCTL_GET_DEF(sctp_default_frag_interleave)
+USRSCTP_SYSCTL_GET_DEF(sctp_mobility_base)
+USRSCTP_SYSCTL_GET_DEF(sctp_mobility_fasthandoff)
+USRSCTP_SYSCTL_GET_DEF(sctp_inits_include_nat_friendly)
+USRSCTP_SYSCTL_GET_DEF(sctp_udp_tunneling_port)
+USRSCTP_SYSCTL_GET_DEF(sctp_enable_sack_immediately)
+USRSCTP_SYSCTL_GET_DEF(sctp_vtag_time_wait)
+USRSCTP_SYSCTL_GET_DEF(sctp_blackhole)
+USRSCTP_SYSCTL_GET_DEF(sctp_diag_info_code)
+USRSCTP_SYSCTL_GET_DEF(sctp_fr_max_burst_default)
+USRSCTP_SYSCTL_GET_DEF(sctp_path_pf_threshold)
+USRSCTP_SYSCTL_GET_DEF(sctp_default_ss_module)
+USRSCTP_SYSCTL_GET_DEF(sctp_rttvar_bw)
+USRSCTP_SYSCTL_GET_DEF(sctp_rttvar_rtt)
+USRSCTP_SYSCTL_GET_DEF(sctp_rttvar_eqret)
+USRSCTP_SYSCTL_GET_DEF(sctp_steady_step)
+USRSCTP_SYSCTL_GET_DEF(sctp_use_dccc_ecn)
+USRSCTP_SYSCTL_GET_DEF(sctp_buffer_splitting)
+USRSCTP_SYSCTL_GET_DEF(sctp_initial_cwnd)
+#ifdef SCTP_DEBUG
+USRSCTP_SYSCTL_GET_DEF(sctp_debug_on)
+#endif
+
+void usrsctp_get_stat(struct sctpstat *stat)
+{
+ *stat = SCTP_BASE_STATS;
+}
diff --git a/ext/sctp/usrsctp/usrsctplib/user_socketvar.h b/ext/sctp/usrsctp/usrsctplib/user_socketvar.h
new file mode 100644
index 000000000..6b79b5908
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_socketvar.h
@@ -0,0 +1,527 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/* __Userspace__ version of <sys/socketvar.h> goes here.*/
+
+#ifndef _USER_SOCKETVAR_H_
+#define _USER_SOCKETVAR_H_
+
+#if defined(__APPLE__)
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+/* #include <sys/selinfo.h> */ /*__Userspace__ alternative?*/ /* for struct selinfo */
+/* #include <sys/_lock.h> was 0 byte file */
+/* #include <sys/_mutex.h> was 0 byte file */
+/* #include <sys/_sx.h> */ /*__Userspace__ alternative?*/
+#if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(_WIN32) && !defined(__native_client__)
+#include <sys/uio.h>
+#endif
+#define SOCK_MAXADDRLEN 255
+#if !defined(MSG_NOTIFICATION)
+#define MSG_NOTIFICATION 0x2000 /* SCTP notification */
+#endif
+#define SCTP_SO_LINGER 0x0001
+#define SCTP_SO_ACCEPTCONN 0x0002
+#define SS_CANTRCVMORE 0x020
+#define SS_CANTSENDMORE 0x010
+
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(_WIN32) || defined(__native_client__)
+#define UIO_MAXIOV 1024
+#define ERESTART (-1)
+#endif
+
+#if !defined(__APPLE__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
+enum uio_rw { UIO_READ, UIO_WRITE };
+#endif
+
+#if !defined(__NetBSD__) && !defined(__OpenBSD__)
+/* Segment flag values. */
+enum uio_seg {
+ UIO_USERSPACE, /* from user data space */
+ UIO_SYSSPACE /* from system space */
+};
+#endif
+
+struct proc {
+ int stub; /* struct proc is a dummy for __Userspace__ */
+};
+
+MALLOC_DECLARE(M_ACCF);
+MALLOC_DECLARE(M_PCB);
+MALLOC_DECLARE(M_SONAME);
+
+/* __Userspace__ Are these all the fields we need?
+ * Removing struct thread *uio_td; owner field
+*/
+struct uio {
+ struct iovec *uio_iov; /* scatter/gather list */
+ int uio_iovcnt; /* length of scatter/gather list */
+ off_t uio_offset; /* offset in target object */
+ ssize_t uio_resid; /* remaining bytes to process */
+ enum uio_seg uio_segflg; /* address space */
+ enum uio_rw uio_rw; /* operation */
+};
+
+
+/* __Userspace__ */
+
+/*
+ * Kernel structure per socket.
+ * Contains send and receive buffer queues,
+ * handle on protocol and pointer to protocol
+ * private data and error information.
+ */
+#if defined(_WIN32)
+#define AF_ROUTE 17
+#if !defined(__MINGW32__)
+typedef __int32 pid_t;
+#endif
+typedef unsigned __int32 uid_t;
+enum sigType {
+ SIGNAL = 0,
+ BROADCAST = 1,
+ MAX_EVENTS = 2
+};
+#endif
+
+/*-
+ * Locking key to struct socket:
+ * (a) constant after allocation, no locking required.
+ * (b) locked by SOCK_LOCK(so).
+ * (c) locked by SOCKBUF_LOCK(&so->so_rcv).
+ * (d) locked by SOCKBUF_LOCK(&so->so_snd).
+ * (e) locked by ACCEPT_LOCK().
+ * (f) not locked since integer reads/writes are atomic.
+ * (g) used only as a sleep/wakeup address, no value.
+ * (h) locked by global mutex so_global_mtx.
+ */
+struct socket {
+ int so_count; /* (b) reference count */
+ short so_type; /* (a) generic type, see socket.h */
+ short so_options; /* from socket call, see socket.h */
+ short so_linger; /* time to linger while closing */
+ short so_state; /* (b) internal state flags SS_* */
+ int so_qstate; /* (e) internal state flags SQ_* */
+ void *so_pcb; /* protocol control block */
+ int so_dom;
+/*
+ * Variables for connection queuing.
+ * Socket where accepts occur is so_head in all subsidiary sockets.
+ * If so_head is 0, socket is not related to an accept.
+ * For head socket so_incomp queues partially completed connections,
+ * while so_comp is a queue of connections ready to be accepted.
+ * If a connection is aborted and it has so_head set, then
+ * it has to be pulled out of either so_incomp or so_comp.
+ * We allow connections to queue up based on current queue lengths
+ * and limit on number of queued connections for this socket.
+ */
+ struct socket *so_head; /* (e) back pointer to listen socket */
+ TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */
+ TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */
+ TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */
+ u_short so_qlen; /* (e) number of unaccepted connections */
+ u_short so_incqlen; /* (e) number of unaccepted incomplete
+ connections */
+ u_short so_qlimit; /* (e) max number queued connections */
+ short so_timeo; /* (g) connection timeout */
+ userland_cond_t timeo_cond; /* timeo_cond condition variable being used in wakeup */
+
+ u_short so_error; /* (f) error affecting connection */
+ struct sigio *so_sigio; /* [sg] information for async I/O or
+ out of band data (SIGURG) */
+ u_long so_oobmark; /* (c) chars to oob mark */
+ TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */
+/*
+ * Variables for socket buffering.
+ */
+ struct sockbuf {
+ /* __Userspace__ Many of these fields may
+ * not be required for the sctp stack.
+ * Commenting out the following.
+ * Including pthread mutex and condition variable to be
+ * used by sbwait, sorwakeup and sowwakeup.
+ */
+ /* struct selinfo sb_sel;*/ /* process selecting read/write */
+ /* struct mtx sb_mtx;*/ /* sockbuf lock */
+ /* struct sx sb_sx;*/ /* prevent I/O interlacing */
+ userland_cond_t sb_cond; /* sockbuf condition variable */
+ userland_mutex_t sb_mtx; /* sockbuf lock associated with sb_cond */
+ short sb_state; /* (c/d) socket state on sockbuf */
+#define sb_startzero sb_mb
+ struct mbuf *sb_mb; /* (c/d) the mbuf chain */
+ struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */
+ struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last
+ * record in socket buffer */
+ struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */
+ u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */
+ u_int sb_cc; /* (c/d) actual chars in buffer */
+ u_int sb_hiwat; /* (c/d) max actual char count */
+ u_int sb_mbcnt; /* (c/d) chars of mbufs used */
+ u_int sb_mbmax; /* (c/d) max chars of mbufs to use */
+ u_int sb_ctl; /* (c/d) non-data chars in buffer */
+ int sb_lowat; /* (c/d) low water mark */
+ int sb_timeo; /* (c/d) timeout for read/write */
+ short sb_flags; /* (c/d) flags, see below */
+ } so_rcv, so_snd;
+/*
+ * Constants for sb_flags field of struct sockbuf.
+ */
+#define SB_MAX (256*1024) /* default for max chars in sockbuf */
+#define SB_RAW (64*1024*2) /*Aligning so->so_rcv.sb_hiwat with the receive buffer size of raw socket*/
+/*
+ * Constants for sb_flags field of struct sockbuf.
+ */
+#define SB_WAIT 0x04 /* someone is waiting for data/space */
+#define SB_SEL 0x08 /* someone is selecting */
+#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */
+#define SB_UPCALL 0x20 /* someone wants an upcall */
+#define SB_NOINTR 0x40 /* operations not interruptible */
+#define SB_AIO 0x80 /* AIO operations queued */
+#define SB_KNOTE 0x100 /* kernel note attached */
+#define SB_AUTOSIZE 0x800 /* automatically size socket buffer */
+
+ void (*so_upcall)(struct socket *, void *, int);
+ void *so_upcallarg;
+ struct ucred *so_cred; /* (a) user credentials */
+ struct label *so_label; /* (b) MAC label for socket */
+ struct label *so_peerlabel; /* (b) cached MAC label for peer */
+ /* NB: generation count must not be first. */
+ uint32_t so_gencnt; /* (h) generation count */
+ void *so_emuldata; /* (b) private data for emulators */
+ struct so_accf {
+ struct accept_filter *so_accept_filter;
+ void *so_accept_filter_arg; /* saved filter args */
+ char *so_accept_filter_str; /* saved user args */
+ } *so_accf;
+};
+
+#define SB_EMPTY_FIXUP(sb) do { \
+ if ((sb)->sb_mb == NULL) { \
+ (sb)->sb_mbtail = NULL; \
+ (sb)->sb_lastrecord = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Global accept mutex to serialize access to accept queues and
+ * fields associated with multiple sockets. This allows us to
+ * avoid defining a lock order between listen and accept sockets
+ * until such time as it proves to be a good idea.
+ */
+#if defined(_WIN32)
+extern userland_mutex_t accept_mtx;
+extern userland_cond_t accept_cond;
+#define ACCEPT_LOCK_ASSERT()
+#define ACCEPT_LOCK() do { \
+ EnterCriticalSection(&accept_mtx); \
+} while (0)
+#define ACCEPT_UNLOCK() do { \
+ LeaveCriticalSection(&accept_mtx); \
+} while (0)
+#define ACCEPT_UNLOCK_ASSERT()
+#else
+extern userland_mutex_t accept_mtx;
+
+extern userland_cond_t accept_cond;
+#ifdef INVARIANTS
+#define ACCEPT_LOCK() KASSERT(pthread_mutex_lock(&accept_mtx) == 0, ("%s: accept_mtx already locked", __func__))
+#define ACCEPT_UNLOCK() KASSERT(pthread_mutex_unlock(&accept_mtx) == 0, ("%s: accept_mtx not locked", __func__))
+#else
+#define ACCEPT_LOCK() (void)pthread_mutex_lock(&accept_mtx)
+#define ACCEPT_UNLOCK() (void)pthread_mutex_unlock(&accept_mtx)
+#endif
+#define ACCEPT_LOCK_ASSERT() \
+ KASSERT(pthread_mutex_trylock(&accept_mtx) == EBUSY, ("%s: accept_mtx not locked", __func__))
+#define ACCEPT_UNLOCK_ASSERT() do { \
+ KASSERT(pthread_mutex_trylock(&accept_mtx) == 0, ("%s: accept_mtx locked", __func__)); \
+ (void)pthread_mutex_unlock(&accept_mtx); \
+ } while (0)
+#endif
+
+/*
+ * Per-socket buffer mutex used to protect most fields in the socket
+ * buffer.
+ */
+#define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx)
+#if defined(_WIN32)
+#define SOCKBUF_LOCK_INIT(_sb, _name) \
+ InitializeCriticalSection(SOCKBUF_MTX(_sb))
+#define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb))
+#define SOCKBUF_COND_INIT(_sb) InitializeConditionVariable((&(_sb)->sb_cond))
+#define SOCKBUF_COND_DESTROY(_sb) DeleteConditionVariable((&(_sb)->sb_cond))
+#define SOCK_COND_INIT(_so) InitializeConditionVariable((&(_so)->timeo_cond))
+#define SOCK_COND_DESTROY(_so) DeleteConditionVariable((&(_so)->timeo_cond))
+#define SOCK_COND(_so) (&(_so)->timeo_cond)
+#else
+#ifdef INVARIANTS
+#define SOCKBUF_LOCK_INIT(_sb, _name) do { \
+ pthread_mutexattr_t mutex_attr; \
+ \
+ pthread_mutexattr_init(&mutex_attr); \
+ pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK); \
+ pthread_mutex_init(SOCKBUF_MTX(_sb), &mutex_attr); \
+ pthread_mutexattr_destroy(&mutex_attr); \
+} while (0)
+#else
+#define SOCKBUF_LOCK_INIT(_sb, _name) \
+ pthread_mutex_init(SOCKBUF_MTX(_sb), NULL)
+#endif
+#define SOCKBUF_LOCK_DESTROY(_sb) pthread_mutex_destroy(SOCKBUF_MTX(_sb))
+#define SOCKBUF_COND_INIT(_sb) pthread_cond_init((&(_sb)->sb_cond), NULL)
+#define SOCKBUF_COND_DESTROY(_sb) pthread_cond_destroy((&(_sb)->sb_cond))
+#define SOCK_COND_INIT(_so) pthread_cond_init((&(_so)->timeo_cond), NULL)
+#define SOCK_COND_DESTROY(_so) pthread_cond_destroy((&(_so)->timeo_cond))
+#define SOCK_COND(_so) (&(_so)->timeo_cond)
+#endif
+/*__Userspace__ SOCKBUF_LOCK(_sb) is now defined in netinet/sctp_process_lock.h */
+
+/* #define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb)) unused */
+/*__Userspace__ SOCKBUF_UNLOCK(_sb) is now defined in netinet/sctp_process_lock.h */
+
+/*__Userspace__ SOCKBUF_LOCK_ASSERT(_sb) is now defined in netinet/sctp_process_lock.h */
+
+/* #define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED) unused */
+
+/*
+ * Per-socket mutex: we reuse the receive socket buffer mutex for space
+ * efficiency. This decision should probably be revisited as we optimize
+ * locking for the socket code.
+ */
+#define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv)
+/*__Userspace__ SOCK_LOCK(_so) is now defined in netinet/sctp_process_lock.h */
+
+/* #define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv) unused */
+/*__Userspace__ SOCK_UNLOCK(_so) is now defined in netinet/sctp_process_lock.h */
+
+#define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv)
+
+/*
+ * Socket state bits.
+ *
+ * Historically, this bits were all kept in the so_state field. For
+ * locking reasons, they are now in multiple fields, as they are
+ * locked differently. so_state maintains basic socket state protected
+ * by the socket lock. so_qstate holds information about the socket
+ * accept queues. Each socket buffer also has a state field holding
+ * information relevant to that socket buffer (can't send, rcv). Many
+ * fields will be read without locks to improve performance and avoid
+ * lock order issues. However, this approach must be used with caution.
+ */
+#define SS_NOFDREF 0x0001 /* no file table ref any more */
+#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */
+#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */
+#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */
+#define SS_NBIO 0x0100 /* non-blocking ops */
+#define SS_ASYNC 0x0200 /* async i/o notify */
+#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */
+#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */
+/*
+ * Protocols can mark a socket as SS_PROTOREF to indicate that, following
+ * pru_detach, they still want the socket to persist, and will free it
+ * themselves when they are done. Protocols should only ever call sofree()
+ * following setting this flag in pru_detach(), and never otherwise, as
+ * sofree() bypasses socket reference counting.
+ */
+#define SS_PROTOREF 0x4000 /* strong protocol reference */
+
+/*
+ * Socket state bits now stored in the socket buffer state field.
+ */
+#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
+#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
+#define SBS_RCVATMARK 0x0040 /* at mark on input */
+
+/*
+ * Socket state bits stored in so_qstate.
+ */
+#define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */
+#define SQ_COMP 0x1000 /* unaccepted, complete connection */
+
+/*
+ * Socket event flags
+ */
+#define SCTP_EVENT_READ 0x0001 /* socket is readable */
+#define SCTP_EVENT_WRITE 0x0002 /* socket is writeable */
+#define SCTP_EVENT_ERROR 0x0004 /* socket has an error state */
+
+
+/*-------------------------------------------------------------*/
+/*-------------------------------------------------------------*/
+/* __Userspace__ */
+/*-------------------------------------------------------------*/
+/*-------------------------------------------------------------*/
+/* this new __Userspace__ section is to copy portions of the _KERNEL block
+ * above into, avoiding having to port the entire thing at once...
+ * For function prototypes, the full bodies are in user_socket.c .
+ */
+
+/* ---------------------------------------------------------- */
+/* --- function prototypes (implemented in user_socket.c) --- */
+/* ---------------------------------------------------------- */
+void soisconnecting(struct socket *so);
+void soisdisconnecting(struct socket *so);
+void soisconnected(struct socket *so);
+struct socket * sonewconn(struct socket *head, int connstatus);
+void socantrcvmore(struct socket *so);
+void socantsendmore(struct socket *so);
+void sofree(struct socket *so);
+
+
+
+/* -------------- */
+/* --- macros --- */
+/* -------------- */
+
+#define soref(so) do { \
+ SOCK_LOCK_ASSERT(so); \
+ ++(so)->so_count; \
+} while (0)
+
+#define sorele(so) do { \
+ ACCEPT_LOCK_ASSERT(); \
+ SOCK_LOCK_ASSERT(so); \
+ KASSERT((so)->so_count > 0, ("sorele")); \
+ if (--(so)->so_count == 0) \
+ sofree(so); \
+ else { \
+ SOCK_UNLOCK(so); \
+ ACCEPT_UNLOCK(); \
+ } \
+} while (0)
+
+
+/* replacing imin with min (user_environment.h) */
+#define sbspace(sb) \
+ ((long) min((int)((sb)->sb_hiwat - (sb)->sb_cc), \
+ (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
+
+/* do we have to send all at once on a socket? */
+#define sosendallatonce(so) \
+ ((so)->so_proto->pr_flags & PR_ATOMIC)
+
+/* can we read something from so? */
+#define soreadable(so) \
+ ((int)((so)->so_rcv.sb_cc) >= (so)->so_rcv.sb_lowat || \
+ ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \
+ !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
+
+#if 0 /* original */
+#define PR_CONNREQUIRED 0x04 /* from sys/protosw.h "needed" for sowriteable */
+#define sowriteable(so) \
+ ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
+ (((so)->so_state&SS_ISCONNECTED) || \
+ ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
+ ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
+ (so)->so_error)
+#else /* line with PR_CONNREQUIRED removed */
+/* can we write something to so? */
+#define sowriteable(so) \
+ ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
+ (((so)->so_state&SS_ISCONNECTED))) || \
+ ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
+ (so)->so_error)
+#endif
+
+extern void solisten_proto(struct socket *so, int backlog);
+extern int solisten_proto_check(struct socket *so);
+extern int sctp_listen(struct socket *so, int backlog, struct proc *p);
+extern void socantrcvmore_locked(struct socket *so);
+extern int sctp_bind(struct socket *so, struct sockaddr *addr);
+extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc);
+extern int sctpconn_bind(struct socket *so, struct sockaddr *addr);
+extern int sctp_accept(struct socket *so, struct sockaddr **addr);
+extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id);
+extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id);
+extern int sctp_abort(struct socket *so);
+extern int sctp6_abort(struct socket *so);
+extern void sctp_close(struct socket *so);
+extern int soaccept(struct socket *so, struct sockaddr **nam);
+extern int solisten(struct socket *so, int backlog);
+extern int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
+extern void sowakeup(struct socket *so, struct sockbuf *sb);
+extern void wakeup(void *ident, struct socket *so); /*__Userspace__ */
+extern int uiomove(void *cp, int n, struct uio *uio);
+extern int sbwait(struct sockbuf *sb);
+extern int sodisconnect(struct socket *so);
+extern int soconnect(struct socket *so, struct sockaddr *nam);
+extern int sctp_disconnect(struct socket *so);
+extern int sctp_connect(struct socket *so, struct sockaddr *addr);
+extern int sctp6_connect(struct socket *so, struct sockaddr *addr);
+extern int sctpconn_connect(struct socket *so, struct sockaddr *addr);
+extern void sctp_finish(void);
+
+/* ------------------------------------------------ */
+/* ----- macros copied from above ---- */
+/* ------------------------------------------------ */
+
+/*
+ * Do we need to notify the other side when I/O is possible?
+ */
+#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
+ SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
+
+
+/*
+ * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
+ * avoid a non-atomic test-and-wakeup. However, sowakeup is
+ * responsible for releasing the lock if it is called. We unlock only
+ * if we don't call into sowakeup. If any code is introduced that
+ * directly invokes the underlying sowakeup() primitives, it must
+ * maintain the same semantics.
+ */
+#define sorwakeup_locked(so) do { \
+ SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \
+ if (sb_notify(&(so)->so_rcv)) \
+ sowakeup((so), &(so)->so_rcv); \
+ else \
+ SOCKBUF_UNLOCK(&(so)->so_rcv); \
+} while (0)
+
+#define sorwakeup(so) do { \
+ SOCKBUF_LOCK(&(so)->so_rcv); \
+ sorwakeup_locked(so); \
+} while (0)
+
+#define sowwakeup_locked(so) do { \
+ SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \
+ if (sb_notify(&(so)->so_snd)) \
+ sowakeup((so), &(so)->so_snd); \
+ else \
+ SOCKBUF_UNLOCK(&(so)->so_snd); \
+} while (0)
+
+#define sowwakeup(so) do { \
+ SOCKBUF_LOCK(&(so)->so_snd); \
+ sowwakeup_locked(so); \
+} while (0)
+
+#endif /* !_SYS_SOCKETVAR_H_ */
diff --git a/ext/sctp/usrsctp/usrsctplib/user_uma.h b/ext/sctp/usrsctp/usrsctplib/user_uma.h
new file mode 100644
index 000000000..59d71fff4
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/user_uma.h
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USER_UMA_H_
+#define _USER_UMA_H_
+
+#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
+#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
+
+/* __Userspace__ All these definitions will change for
+userspace Universal Memory Allocator (UMA). These are included
+for reference purposes and to avoid compile errors for the time being.
+*/
+typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags);
+typedef void (*uma_dtor)(void *mem, int size, void *arg);
+typedef int (*uma_init)(void *mem, int size, int flags);
+typedef void (*uma_fini)(void *mem, int size);
+typedef struct uma_zone * uma_zone_t;
+typedef struct uma_keg * uma_keg_t;
+
+struct uma_cache {
+ int stub; /* TODO __Userspace__ */
+};
+
+struct uma_keg {
+ int stub; /* TODO __Userspace__ */
+};
+
+struct uma_zone {
+ char *uz_name; /* Text name of the zone */
+ struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
+ uma_keg_t uz_keg; /* Our underlying Keg */
+
+ LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
+ LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
+ LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
+
+ uma_ctor uz_ctor; /* Constructor for each allocation */
+ uma_dtor uz_dtor; /* Destructor */
+ uma_init uz_init; /* Initializer for each item */
+ uma_fini uz_fini; /* Discards memory */
+
+ u_int64_t uz_allocs; /* Total number of allocations */
+ u_int64_t uz_frees; /* Total number of frees */
+ u_int64_t uz_fails; /* Total number of alloc failures */
+ uint16_t uz_fills; /* Outstanding bucket fills */
+ uint16_t uz_count; /* Highest value ub_ptr can have */
+
+ /*
+ * This HAS to be the last item because we adjust the zone size
+ * based on NCPU and then allocate the space for the zones.
+ */
+ struct uma_cache uz_cpu[1]; /* Per cpu caches */
+};
+
+/* Prototype */
+uma_zone_t
+uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
+ uma_init uminit, uma_fini fini, int align, uint32_t flags);
+
+
+#define uma_zone_set_max(zone, number) /* stub TODO __Userspace__ */
+
+uma_zone_t
+uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
+ uma_init uminit, uma_fini fini, int align, uint32_t flags) {
+ return NULL; /* stub TODO __Userspace__. Also place implementation in a separate .c file */
+}
+#endif
diff --git a/ext/sctp/usrsctp/usrsctplib/usrsctp.h b/ext/sctp/usrsctp/usrsctplib/usrsctp.h
new file mode 100644
index 000000000..fea2aafd2
--- /dev/null
+++ b/ext/sctp/usrsctp/usrsctplib/usrsctp.h
@@ -0,0 +1,1321 @@
+/*-
+ * Copyright (c) 2009-2010 Brad Penoff
+ * Copyright (c) 2009-2010 Humaira Kamal
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __USRSCTP_H__
+#define __USRSCTP_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <errno.h>
+#include <sys/types.h>
+#ifdef _WIN32
+#ifdef _MSC_VER
+#pragma warning(disable: 4200)
+#endif
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#ifndef MSG_NOTIFICATION
+/* This definition MUST be in sync with usrsctplib/user_socketvar.h */
+#define MSG_NOTIFICATION 0x2000
+#endif
+
+#ifndef IPPROTO_SCTP
+/* This is the IANA assigned protocol number of SCTP. */
+#define IPPROTO_SCTP 132
+#endif
+
+#ifdef _WIN32
+#if defined(_MSC_VER) && _MSC_VER >= 1600
+#include <stdint.h>
+#elif defined(SCTP_STDINT_INCLUDE)
+#include SCTP_STDINT_INCLUDE
+#else
+#define uint8_t unsigned __int8
+#define uint16_t unsigned __int16
+#define uint32_t unsigned __int32
+#define uint64_t unsigned __int64
+#define int16_t __int16
+#define int32_t __int32
+#endif
+
+#ifndef ssize_t
+#ifdef _WIN64
+#define ssize_t __int64
+#elif defined _WIN32
+#define ssize_t int
+#else
+#error "Unknown platform!"
+#endif
+#endif
+
+#define MSG_EOR 0x8
+#ifndef EWOULDBLOCK
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#endif
+#ifndef EINPROGRESS
+#define EINPROGRESS WSAEINPROGRESS
+#endif
+#define SHUT_RD 1
+#define SHUT_WR 2
+#define SHUT_RDWR 3
+#endif
+
+typedef uint32_t sctp_assoc_t;
+
+#if defined(_WIN32) && defined(_MSC_VER)
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#else
+#define SCTP_PACKED __attribute__((packed))
+#endif
+
+struct sctp_common_header {
+ uint16_t source_port;
+ uint16_t destination_port;
+ uint32_t verification_tag;
+ uint32_t crc32c;
+} SCTP_PACKED;
+
+#if defined(_WIN32) && defined(_MSC_VER)
+#pragma pack(pop)
+#endif
+#undef SCTP_PACKED
+
+#define AF_CONN 123
+/* The definition of struct sockaddr_conn MUST be in
+ * tune with other sockaddr_* structures.
+ */
+#if defined(__APPLE__) || defined(__Bitrig__) || defined(__DragonFly__) || \
+ defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
+struct sockaddr_conn {
+ uint8_t sconn_len;
+ uint8_t sconn_family;
+ uint16_t sconn_port;
+ void *sconn_addr;
+};
+#else
+struct sockaddr_conn {
+ uint16_t sconn_family;
+ uint16_t sconn_port;
+ void *sconn_addr;
+};
+#endif
+
+union sctp_sockstore {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_conn sconn;
+ struct sockaddr sa;
+};
+
+#define SCTP_FUTURE_ASSOC 0
+#define SCTP_CURRENT_ASSOC 1
+#define SCTP_ALL_ASSOC 2
+
+#define SCTP_EVENT_READ 0x0001
+#define SCTP_EVENT_WRITE 0x0002
+#define SCTP_EVENT_ERROR 0x0004
+
+/*** Structures and definitions to use the socket API ***/
+
+#define SCTP_ALIGN_RESV_PAD 92
+#define SCTP_ALIGN_RESV_PAD_SHORT 76
+
+struct sctp_rcvinfo {
+ uint16_t rcv_sid;
+ uint16_t rcv_ssn;
+ uint16_t rcv_flags;
+ uint32_t rcv_ppid;
+ uint32_t rcv_tsn;
+ uint32_t rcv_cumtsn;
+ uint32_t rcv_context;
+ sctp_assoc_t rcv_assoc_id;
+};
+
+struct sctp_nxtinfo {
+ uint16_t nxt_sid;
+ uint16_t nxt_flags;
+ uint32_t nxt_ppid;
+ uint32_t nxt_length;
+ sctp_assoc_t nxt_assoc_id;
+};
+
+#define SCTP_NO_NEXT_MSG 0x0000
+#define SCTP_NEXT_MSG_AVAIL 0x0001
+#define SCTP_NEXT_MSG_ISCOMPLETE 0x0002
+#define SCTP_NEXT_MSG_IS_UNORDERED 0x0004
+#define SCTP_NEXT_MSG_IS_NOTIFICATION 0x0008
+
+struct sctp_recvv_rn {
+ struct sctp_rcvinfo recvv_rcvinfo;
+ struct sctp_nxtinfo recvv_nxtinfo;
+};
+
+#define SCTP_RECVV_NOINFO 0
+#define SCTP_RECVV_RCVINFO 1
+#define SCTP_RECVV_NXTINFO 2
+#define SCTP_RECVV_RN 3
+
+#define SCTP_SENDV_NOINFO 0
+#define SCTP_SENDV_SNDINFO 1
+#define SCTP_SENDV_PRINFO 2
+#define SCTP_SENDV_AUTHINFO 3
+#define SCTP_SENDV_SPA 4
+
+#define SCTP_SEND_SNDINFO_VALID 0x00000001
+#define SCTP_SEND_PRINFO_VALID 0x00000002
+#define SCTP_SEND_AUTHINFO_VALID 0x00000004
+
+struct sctp_snd_all_completes {
+ uint16_t sall_stream;
+ uint16_t sall_flags;
+ uint32_t sall_ppid;
+ uint32_t sall_context;
+ uint32_t sall_num_sent;
+ uint32_t sall_num_failed;
+};
+
+struct sctp_sndinfo {
+ uint16_t snd_sid;
+ uint16_t snd_flags;
+ uint32_t snd_ppid;
+ uint32_t snd_context;
+ sctp_assoc_t snd_assoc_id;
+};
+
+struct sctp_prinfo {
+ uint16_t pr_policy;
+ uint32_t pr_value;
+};
+
+struct sctp_authinfo {
+ uint16_t auth_keynumber;
+};
+
+struct sctp_sendv_spa {
+ uint32_t sendv_flags;
+ struct sctp_sndinfo sendv_sndinfo;
+ struct sctp_prinfo sendv_prinfo;
+ struct sctp_authinfo sendv_authinfo;
+};
+
+struct sctp_udpencaps {
+ struct sockaddr_storage sue_address;
+ uint32_t sue_assoc_id;
+ uint16_t sue_port;
+};
+
+/******** Notifications **************/
+
+/* notification types */
+#define SCTP_ASSOC_CHANGE 0x0001
+#define SCTP_PEER_ADDR_CHANGE 0x0002
+#define SCTP_REMOTE_ERROR 0x0003
+#define SCTP_SEND_FAILED 0x0004
+#define SCTP_SHUTDOWN_EVENT 0x0005
+#define SCTP_ADAPTATION_INDICATION 0x0006
+#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
+#define SCTP_AUTHENTICATION_EVENT 0x0008
+#define SCTP_STREAM_RESET_EVENT 0x0009
+#define SCTP_SENDER_DRY_EVENT 0x000a
+#define SCTP_NOTIFICATIONS_STOPPED_EVENT 0x000b
+#define SCTP_ASSOC_RESET_EVENT 0x000c
+#define SCTP_STREAM_CHANGE_EVENT 0x000d
+#define SCTP_SEND_FAILED_EVENT 0x000e
+
+/* notification event structures */
+
+
+/* association change event */
+struct sctp_assoc_change {
+ uint16_t sac_type;
+ uint16_t sac_flags;
+ uint32_t sac_length;
+ uint16_t sac_state;
+ uint16_t sac_error;
+ uint16_t sac_outbound_streams;
+ uint16_t sac_inbound_streams;
+ sctp_assoc_t sac_assoc_id;
+ uint8_t sac_info[]; /* not available yet */
+};
+
+/* sac_state values */
+#define SCTP_COMM_UP 0x0001
+#define SCTP_COMM_LOST 0x0002
+#define SCTP_RESTART 0x0003
+#define SCTP_SHUTDOWN_COMP 0x0004
+#define SCTP_CANT_STR_ASSOC 0x0005
+
+/* sac_info values */
+#define SCTP_ASSOC_SUPPORTS_PR 0x01
+#define SCTP_ASSOC_SUPPORTS_AUTH 0x02
+#define SCTP_ASSOC_SUPPORTS_ASCONF 0x03
+#define SCTP_ASSOC_SUPPORTS_MULTIBUF 0x04
+#define SCTP_ASSOC_SUPPORTS_RE_CONFIG 0x05
+#define SCTP_ASSOC_SUPPORTS_INTERLEAVING 0x06
+#define SCTP_ASSOC_SUPPORTS_MAX 0x06
+
+/* Address event */
+struct sctp_paddr_change {
+ uint16_t spc_type;
+ uint16_t spc_flags;
+ uint32_t spc_length;
+ struct sockaddr_storage spc_aaddr;
+ uint32_t spc_state;
+ uint32_t spc_error;
+ sctp_assoc_t spc_assoc_id;
+ uint8_t spc_padding[4];
+};
+
+/* paddr state values */
+#define SCTP_ADDR_AVAILABLE 0x0001
+#define SCTP_ADDR_UNREACHABLE 0x0002
+#define SCTP_ADDR_REMOVED 0x0003
+#define SCTP_ADDR_ADDED 0x0004
+#define SCTP_ADDR_MADE_PRIM 0x0005
+#define SCTP_ADDR_CONFIRMED 0x0006
+
+/* remote error events */
+struct sctp_remote_error {
+ uint16_t sre_type;
+ uint16_t sre_flags;
+ uint32_t sre_length;
+ uint16_t sre_error;
+ sctp_assoc_t sre_assoc_id;
+ uint8_t sre_data[];
+};
+
+/* shutdown event */
+struct sctp_shutdown_event {
+ uint16_t sse_type;
+ uint16_t sse_flags;
+ uint32_t sse_length;
+ sctp_assoc_t sse_assoc_id;
+};
+
+/* Adaptation layer indication */
+struct sctp_adaptation_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaptation_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+/* Partial delivery event */
+struct sctp_pdapi_event {
+ uint16_t pdapi_type;
+ uint16_t pdapi_flags;
+ uint32_t pdapi_length;
+ uint32_t pdapi_indication;
+ uint32_t pdapi_stream;
+ uint32_t pdapi_seq;
+ sctp_assoc_t pdapi_assoc_id;
+};
+
+/* indication values */
+#define SCTP_PARTIAL_DELIVERY_ABORTED 0x0001
+
+/* SCTP authentication event */
+struct sctp_authkey_event {
+ uint16_t auth_type;
+ uint16_t auth_flags;
+ uint32_t auth_length;
+ uint16_t auth_keynumber;
+ uint32_t auth_indication;
+ sctp_assoc_t auth_assoc_id;
+};
+
+/* indication values */
+#define SCTP_AUTH_NEW_KEY 0x0001
+#define SCTP_AUTH_NO_AUTH 0x0002
+#define SCTP_AUTH_FREE_KEY 0x0003
+
+/* SCTP sender dry event */
+struct sctp_sender_dry_event {
+ uint16_t sender_dry_type;
+ uint16_t sender_dry_flags;
+ uint32_t sender_dry_length;
+ sctp_assoc_t sender_dry_assoc_id;
+};
+
+
+/* Stream reset event - subscribe to SCTP_STREAM_RESET_EVENT */
+struct sctp_stream_reset_event {
+ uint16_t strreset_type;
+ uint16_t strreset_flags;
+ uint32_t strreset_length;
+ sctp_assoc_t strreset_assoc_id;
+ uint16_t strreset_stream_list[];
+};
+
+/* flags in stream_reset_event (strreset_flags) */
+#define SCTP_STREAM_RESET_INCOMING_SSN 0x0001
+#define SCTP_STREAM_RESET_OUTGOING_SSN 0x0002
+#define SCTP_STREAM_RESET_DENIED 0x0004 /* SCTP_STRRESET_FAILED */
+#define SCTP_STREAM_RESET_FAILED 0x0008 /* SCTP_STRRESET_FAILED */
+#define SCTP_STREAM_CHANGED_DENIED 0x0010
+
+#define SCTP_STREAM_RESET_INCOMING 0x00000001
+#define SCTP_STREAM_RESET_OUTGOING 0x00000002
+
+
+/* Assoc reset event - subscribe to SCTP_ASSOC_RESET_EVENT */
+struct sctp_assoc_reset_event {
+ uint16_t assocreset_type;
+ uint16_t assocreset_flags;
+ uint32_t assocreset_length;
+ sctp_assoc_t assocreset_assoc_id;
+ uint32_t assocreset_local_tsn;
+ uint32_t assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_RESET_DENIED 0x0004
+#define SCTP_ASSOC_RESET_FAILED 0x0008
+
+
+/* Stream change event - subscribe to SCTP_STREAM_CHANGE_EVENT */
+struct sctp_stream_change_event {
+ uint16_t strchange_type;
+ uint16_t strchange_flags;
+ uint32_t strchange_length;
+ sctp_assoc_t strchange_assoc_id;
+ uint16_t strchange_instrms;
+ uint16_t strchange_outstrms;
+};
+
+#define SCTP_STREAM_CHANGE_DENIED 0x0004
+#define SCTP_STREAM_CHANGE_FAILED 0x0008
+
+
+/* SCTP send failed event */
+struct sctp_send_failed_event {
+ uint16_t ssfe_type;
+ uint16_t ssfe_flags;
+ uint32_t ssfe_length;
+ uint32_t ssfe_error;
+ struct sctp_sndinfo ssfe_info;
+ sctp_assoc_t ssfe_assoc_id;
+ uint8_t ssfe_data[];
+};
+
+/* flag that indicates state of data */
+#define SCTP_DATA_UNSENT 0x0001 /* inqueue never on wire */
+#define SCTP_DATA_SENT 0x0002 /* on wire at failure */
+
+/* SCTP event option */
+struct sctp_event {
+ sctp_assoc_t se_assoc_id;
+ uint16_t se_type;
+ uint8_t se_on;
+};
+
+union sctp_notification {
+ struct sctp_tlv {
+ uint16_t sn_type;
+ uint16_t sn_flags;
+ uint32_t sn_length;
+ } sn_header;
+ struct sctp_assoc_change sn_assoc_change;
+ struct sctp_paddr_change sn_paddr_change;
+ struct sctp_remote_error sn_remote_error;
+ struct sctp_shutdown_event sn_shutdown_event;
+ struct sctp_adaptation_event sn_adaptation_event;
+ struct sctp_pdapi_event sn_pdapi_event;
+ struct sctp_authkey_event sn_auth_event;
+ struct sctp_sender_dry_event sn_sender_dry_event;
+ struct sctp_send_failed_event sn_send_failed_event;
+ struct sctp_stream_reset_event sn_strreset_event;
+ struct sctp_assoc_reset_event sn_assocreset_event;
+ struct sctp_stream_change_event sn_strchange_event;
+};
+
+struct sctp_event_subscribe {
+ uint8_t sctp_data_io_event;
+ uint8_t sctp_association_event;
+ uint8_t sctp_address_event;
+ uint8_t sctp_send_failure_event;
+ uint8_t sctp_peer_error_event;
+ uint8_t sctp_shutdown_event;
+ uint8_t sctp_partial_delivery_event;
+ uint8_t sctp_adaptation_layer_event;
+ uint8_t sctp_authentication_event;
+ uint8_t sctp_sender_dry_event;
+ uint8_t sctp_stream_reset_event;
+};
+
+
+
+/* Flags that go into the sinfo->sinfo_flags field */
+#define SCTP_DATA_LAST_FRAG 0x0001 /* tail part of the message could not be sent */
+#define SCTP_DATA_NOT_FRAG 0x0003 /* complete message could not be sent */
+#define SCTP_NOTIFICATION 0x0010 /* next message is a notification */
+#define SCTP_COMPLETE 0x0020 /* next message is complete */
+#define SCTP_EOF 0x0100 /* Start shutdown procedures */
+#define SCTP_ABORT 0x0200 /* Send an ABORT to peer */
+#define SCTP_UNORDERED 0x0400 /* Message is un-ordered */
+#define SCTP_ADDR_OVER 0x0800 /* Override the primary-address */
+#define SCTP_SENDALL 0x1000 /* Send this on all associations */
+#define SCTP_EOR 0x2000 /* end of message signal */
+#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
+
+#define INVALID_SINFO_FLAG(x) (((x) & 0xfffffff0 \
+ & ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
+ SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
+ SCTP_SACK_IMMEDIATELY)) != 0)
+/* for the endpoint */
+
+/* The lower byte is an enumeration of PR-SCTP policies */
+#define SCTP_PR_SCTP_NONE 0x0000 /* Reliable transfer */
+#define SCTP_PR_SCTP_TTL 0x0001 /* Time based PR-SCTP */
+#define SCTP_PR_SCTP_BUF 0x0002 /* Buffer based PR-SCTP */
+#define SCTP_PR_SCTP_RTX 0x0003 /* Number of retransmissions based PR-SCTP */
+
+#define PR_SCTP_POLICY(x) ((x) & 0x0f)
+#define PR_SCTP_ENABLED(x) (PR_SCTP_POLICY(x) != SCTP_PR_SCTP_NONE)
+#define PR_SCTP_TTL_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL)
+#define PR_SCTP_BUF_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF)
+#define PR_SCTP_RTX_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX)
+#define PR_SCTP_INVALID_POLICY(x) (PR_SCTP_POLICY(x) > SCTP_PR_SCTP_RTX)
+
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO 0x00000001
+#define SCTP_ASSOCINFO 0x00000002
+#define SCTP_INITMSG 0x00000003
+#define SCTP_NODELAY 0x00000004
+#define SCTP_AUTOCLOSE 0x00000005
+#define SCTP_PRIMARY_ADDR 0x00000007
+#define SCTP_ADAPTATION_LAYER 0x00000008
+#define SCTP_DISABLE_FRAGMENTS 0x00000009
+#define SCTP_PEER_ADDR_PARAMS 0x0000000a
+/* ancillary data/notification interest options */
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR 0x0000000d
+#define SCTP_MAXSEG 0x0000000e
+#define SCTP_DELAYED_SACK 0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE 0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT 0x00000011
+/* authentication support */
+#define SCTP_HMAC_IDENT 0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 0x00000015
+#define SCTP_AUTO_ASCONF 0x00000018
+#define SCTP_MAX_BURST 0x00000019
+/* assoc level context */
+#define SCTP_CONTEXT 0x0000001a
+/* explicit EOR signalling */
+#define SCTP_EXPLICIT_EOR 0x0000001b
+#define SCTP_REUSE_PORT 0x0000001c
+
+#define SCTP_EVENT 0x0000001e
+#define SCTP_RECVRCVINFO 0x0000001f
+#define SCTP_RECVNXTINFO 0x00000020
+#define SCTP_DEFAULT_SNDINFO 0x00000021
+#define SCTP_DEFAULT_PRINFO 0x00000022
+#define SCTP_REMOTE_UDP_ENCAPS_PORT 0x00000024
+#define SCTP_ECN_SUPPORTED 0x00000025
+#define SCTP_PR_SUPPORTED 0x00000026
+#define SCTP_AUTH_SUPPORTED 0x00000027
+#define SCTP_ASCONF_SUPPORTED 0x00000028
+#define SCTP_RECONFIG_SUPPORTED 0x00000029
+#define SCTP_NRSACK_SUPPORTED 0x00000030
+#define SCTP_PKTDROP_SUPPORTED 0x00000031
+#define SCTP_MAX_CWND 0x00000032
+
+#define SCTP_ENABLE_STREAM_RESET 0x00000900 /* struct sctp_assoc_value */
+
+/* Pluggable Stream Scheduling Socket option */
+#define SCTP_PLUGGABLE_SS 0x00001203
+#define SCTP_SS_VALUE 0x00001204
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS 0x00000100
+#define SCTP_GET_PEER_ADDR_INFO 0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 0x00000103
+#define SCTP_GET_ASSOC_NUMBER 0x00000104
+#define SCTP_GET_ASSOC_ID_LIST 0x00000105
+#define SCTP_TIMEOUTS 0x00000106
+#define SCTP_PR_STREAM_STATUS 0x00000107
+#define SCTP_PR_ASSOC_STATUS 0x00000108
+
+/*
+ * write-only options
+ */
+#define SCTP_SET_PEER_PRIMARY_ADDR 0x00000006
+#define SCTP_AUTH_CHUNK 0x00000012
+#define SCTP_AUTH_KEY 0x00000013
+#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
+#define SCTP_AUTH_DELETE_KEY 0x00000016
+#define SCTP_RESET_STREAMS 0x00000901 /* struct sctp_reset_streams */
+#define SCTP_RESET_ASSOC 0x00000902 /* sctp_assoc_t */
+#define SCTP_ADD_STREAMS 0x00000903 /* struct sctp_add_streams */
+
+struct sctp_initmsg {
+ uint16_t sinit_num_ostreams;
+ uint16_t sinit_max_instreams;
+ uint16_t sinit_max_attempts;
+ uint16_t sinit_max_init_timeo;
+};
+
+struct sctp_rtoinfo {
+ sctp_assoc_t srto_assoc_id;
+ uint32_t srto_initial;
+ uint32_t srto_max;
+ uint32_t srto_min;
+};
+
+struct sctp_assocparams {
+ sctp_assoc_t sasoc_assoc_id;
+ uint32_t sasoc_peer_rwnd;
+ uint32_t sasoc_local_rwnd;
+ uint32_t sasoc_cookie_life;
+ uint16_t sasoc_asocmaxrxt;
+ uint16_t sasoc_number_peer_destinations;
+};
+
+struct sctp_setprim {
+ struct sockaddr_storage ssp_addr;
+ sctp_assoc_t ssp_assoc_id;
+ uint8_t ssp_padding[4];
+};
+
+struct sctp_setadaptation {
+ uint32_t ssb_adaptation_ind;
+};
+
+struct sctp_paddrparams {
+ struct sockaddr_storage spp_address;
+ sctp_assoc_t spp_assoc_id;
+ uint32_t spp_hbinterval;
+ uint32_t spp_pathmtu;
+ uint32_t spp_flags;
+ uint32_t spp_ipv6_flowlabel;
+ uint16_t spp_pathmaxrxt;
+ uint8_t spp_dscp;
+};
+
+#define SPP_HB_ENABLE 0x00000001
+#define SPP_HB_DISABLE 0x00000002
+#define SPP_HB_DEMAND 0x00000004
+#define SPP_PMTUD_ENABLE 0x00000008
+#define SPP_PMTUD_DISABLE 0x00000010
+#define SPP_HB_TIME_IS_ZERO 0x00000080
+#define SPP_IPV6_FLOWLABEL 0x00000100
+#define SPP_DSCP 0x00000200
+
+/* Used for SCTP_MAXSEG, SCTP_MAX_BURST, SCTP_ENABLE_STREAM_RESET, and SCTP_CONTEXT */
+struct sctp_assoc_value {
+ sctp_assoc_t assoc_id;
+ uint32_t assoc_value;
+};
+
+/* To enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ 0x00000001
+#define SCTP_ENABLE_RESET_ASSOC_REQ 0x00000002
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x00000004
+#define SCTP_ENABLE_VALUE_MASK 0x00000007
+
+struct sctp_reset_streams {
+ sctp_assoc_t srs_assoc_id;
+ uint16_t srs_flags;
+ uint16_t srs_number_streams; /* 0 == ALL */
+ uint16_t srs_stream_list[]; /* list if strrst_num_streams is not 0 */
+};
+
+struct sctp_add_streams {
+ sctp_assoc_t sas_assoc_id;
+ uint16_t sas_instrms;
+ uint16_t sas_outstrms;
+};
+
+struct sctp_hmacalgo {
+ uint32_t shmac_number_of_idents;
+ uint16_t shmac_idents[];
+};
+
+/* AUTH hmac_id */
+#define SCTP_AUTH_HMAC_ID_RSVD 0x0000
+#define SCTP_AUTH_HMAC_ID_SHA1 0x0001 /* default, mandatory */
+#define SCTP_AUTH_HMAC_ID_SHA256 0x0003
+#define SCTP_AUTH_HMAC_ID_SHA224 0x0004
+#define SCTP_AUTH_HMAC_ID_SHA384 0x0005
+#define SCTP_AUTH_HMAC_ID_SHA512 0x0006
+
+
+struct sctp_sack_info {
+ sctp_assoc_t sack_assoc_id;
+ uint32_t sack_delay;
+ uint32_t sack_freq;
+};
+
+struct sctp_default_prinfo {
+ uint16_t pr_policy;
+ uint32_t pr_value;
+ sctp_assoc_t pr_assoc_id;
+};
+
+struct sctp_paddrinfo {
+ struct sockaddr_storage spinfo_address;
+ sctp_assoc_t spinfo_assoc_id;
+ int32_t spinfo_state;
+ uint32_t spinfo_cwnd;
+ uint32_t spinfo_srtt;
+ uint32_t spinfo_rto;
+ uint32_t spinfo_mtu;
+};
+
+struct sctp_status {
+ sctp_assoc_t sstat_assoc_id;
+ int32_t sstat_state;
+ uint32_t sstat_rwnd;
+ uint16_t sstat_unackdata;
+ uint16_t sstat_penddata;
+ uint16_t sstat_instrms;
+ uint16_t sstat_outstrms;
+ uint32_t sstat_fragmentation_point;
+ struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED 0x0000
+#define SCTP_BOUND 0x1000
+#define SCTP_LISTEN 0x2000
+#define SCTP_COOKIE_WAIT 0x0002
+#define SCTP_COOKIE_ECHOED 0x0004
+#define SCTP_ESTABLISHED 0x0008
+#define SCTP_SHUTDOWN_SENT 0x0010
+#define SCTP_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_SHUTDOWN_PENDING 0x0080
+
+
+#define SCTP_ACTIVE 0x0001 /* SCTP_ADDR_REACHABLE */
+#define SCTP_INACTIVE 0x0002 /* neither SCTP_ADDR_REACHABLE
+ nor SCTP_ADDR_UNCONFIRMED */
+#define SCTP_UNCONFIRMED 0x0200 /* SCTP_ADDR_UNCONFIRMED */
+
+struct sctp_authchunks {
+ sctp_assoc_t gauth_assoc_id;
+/* uint32_t gauth_number_of_chunks; not available */
+ uint8_t gauth_chunks[];
+};
+
+struct sctp_assoc_ids {
+ uint32_t gaids_number_of_ids;
+ sctp_assoc_t gaids_assoc_id[];
+};
+
+struct sctp_setpeerprim {
+ struct sockaddr_storage sspp_addr;
+ sctp_assoc_t sspp_assoc_id;
+ uint8_t sspp_padding[4];
+};
+
+struct sctp_authchunk {
+ uint8_t sauth_chunk;
+};
+
+
+struct sctp_get_nonce_values {
+ sctp_assoc_t gn_assoc_id;
+ uint32_t gn_peers_tag;
+ uint32_t gn_local_tag;
+};
+
+
+/*
+ * Main SCTP chunk types
+ */
+/************0x00 series ***********/
+#define SCTP_DATA 0x00
+#define SCTP_INITIATION 0x01
+#define SCTP_INITIATION_ACK 0x02
+#define SCTP_SELECTIVE_ACK 0x03
+#define SCTP_HEARTBEAT_REQUEST 0x04
+#define SCTP_HEARTBEAT_ACK 0x05
+#define SCTP_ABORT_ASSOCIATION 0x06
+#define SCTP_SHUTDOWN 0x07
+#define SCTP_SHUTDOWN_ACK 0x08
+#define SCTP_OPERATION_ERROR 0x09
+#define SCTP_COOKIE_ECHO 0x0a
+#define SCTP_COOKIE_ACK 0x0b
+#define SCTP_ECN_ECHO 0x0c
+#define SCTP_ECN_CWR 0x0d
+#define SCTP_SHUTDOWN_COMPLETE 0x0e
+/* RFC4895 */
+#define SCTP_AUTHENTICATION 0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK 0x10
+/************0x40 series ***********/
+/************0x80 series ***********/
+/* RFC5061 */
+#define SCTP_ASCONF_ACK 0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED 0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET 0x82
+
+/* RFC4820 */
+#define SCTP_PAD_CHUNK 0x84
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN 0xc0
+/* RFC5061 */
+#define SCTP_ASCONF 0xc1
+
+struct sctp_authkey {
+ sctp_assoc_t sca_assoc_id;
+ uint16_t sca_keynumber;
+ uint16_t sca_keylength;
+ uint8_t sca_key[];
+};
+
+struct sctp_authkeyid {
+ sctp_assoc_t scact_assoc_id;
+ uint16_t scact_keynumber;
+};
+
+struct sctp_cc_option {
+ int option;
+ struct sctp_assoc_value aid_value;
+};
+
+struct sctp_stream_value {
+ sctp_assoc_t assoc_id;
+ uint16_t stream_id;
+ uint16_t stream_value;
+};
+
+struct sctp_timeouts {
+ sctp_assoc_t stimo_assoc_id;
+ uint32_t stimo_init;
+ uint32_t stimo_data;
+ uint32_t stimo_sack;
+ uint32_t stimo_shutdown;
+ uint32_t stimo_heartbeat;
+ uint32_t stimo_cookie;
+ uint32_t stimo_shutdownack;
+};
+
+struct sctp_prstatus {
+ sctp_assoc_t sprstat_assoc_id;
+ uint16_t sprstat_sid;
+ uint16_t sprstat_policy;
+ uint64_t sprstat_abandoned_unsent;
+ uint64_t sprstat_abandoned_sent;
+};
+
+/* Standard TCP Congestion Control */
+#define SCTP_CC_RFC2581 0x00000000
+/* High Speed TCP Congestion Control (Floyd) */
+#define SCTP_CC_HSTCP 0x00000001
+/* HTCP Congestion Control */
+#define SCTP_CC_HTCP 0x00000002
+/* RTCC Congestion Control - RFC2581 plus */
+#define SCTP_CC_RTCC 0x00000003
+
+#define SCTP_CC_OPT_RTCC_SETMODE 0x00002000
+#define SCTP_CC_OPT_USE_DCCC_EC 0x00002001
+#define SCTP_CC_OPT_STEADY_STEP 0x00002002
+
+#define SCTP_CMT_OFF 0
+#define SCTP_CMT_BASE 1
+#define SCTP_CMT_RPV1 2
+#define SCTP_CMT_RPV2 3
+#define SCTP_CMT_MPTCP 4
+#define SCTP_CMT_MAX SCTP_CMT_MPTCP
+
+/* RS - Supported stream scheduling modules for pluggable
+ * stream scheduling
+ */
+/* Default simple round-robin */
+#define SCTP_SS_DEFAULT 0x00000000
+/* Real round-robin */
+#define SCTP_SS_ROUND_ROBIN 0x00000001
+/* Real round-robin per packet */
+#define SCTP_SS_ROUND_ROBIN_PACKET 0x00000002
+/* Priority */
+#define SCTP_SS_PRIORITY 0x00000003
+/* Fair Bandwidth */
+#define SCTP_SS_FAIR_BANDWITH 0x00000004
+/* First-come, first-serve */
+#define SCTP_SS_FIRST_COME 0x00000005
+
+/******************** System calls *************/
+
+struct socket;
+
+void
+usrsctp_init(uint16_t,
+ int (*)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*)(const char *format, ...));
+
+void
+usrsctp_init_nothreads(uint16_t,
+ int (*)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+ void (*)(const char *format, ...));
+
+struct socket *
+usrsctp_socket(int domain, int type, int protocol,
+ int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data,
+ size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info),
+ int (*send_cb)(struct socket *sock, uint32_t sb_free),
+ uint32_t sb_threshold,
+ void *ulp_info);
+
+int
+usrsctp_setsockopt(struct socket *so,
+ int level,
+ int option_name,
+ const void *option_value,
+ socklen_t option_len);
+
+int
+usrsctp_getsockopt(struct socket *so,
+ int level,
+ int option_name,
+ void *option_value,
+ socklen_t *option_len);
+
+int
+usrsctp_opt_info(struct socket *so,
+ sctp_assoc_t id,
+ int opt,
+ void *arg,
+ socklen_t *size);
+
+int
+usrsctp_getpaddrs(struct socket *so,
+ sctp_assoc_t id,
+ struct sockaddr **raddrs);
+
+void
+usrsctp_freepaddrs(struct sockaddr *addrs);
+
+int
+usrsctp_getladdrs(struct socket *so,
+ sctp_assoc_t id,
+ struct sockaddr **raddrs);
+
+void
+usrsctp_freeladdrs(struct sockaddr *addrs);
+
+ssize_t
+usrsctp_sendv(struct socket *so,
+ const void *data,
+ size_t len,
+ struct sockaddr *to,
+ int addrcnt,
+ void *info,
+ socklen_t infolen,
+ unsigned int infotype,
+ int flags);
+
+ssize_t
+usrsctp_recvv(struct socket *so,
+ void *dbuf,
+ size_t len,
+ struct sockaddr *from,
+ socklen_t * fromlen,
+ void *info,
+ socklen_t *infolen,
+ unsigned int *infotype,
+ int *msg_flags);
+
+int
+usrsctp_bind(struct socket *so,
+ struct sockaddr *name,
+ socklen_t namelen);
+
+#define SCTP_BINDX_ADD_ADDR 0x00008001
+#define SCTP_BINDX_REM_ADDR 0x00008002
+
+int
+usrsctp_bindx(struct socket *so,
+ struct sockaddr *addrs,
+ int addrcnt,
+ int flags);
+
+int
+usrsctp_listen(struct socket *so,
+ int backlog);
+
+struct socket *
+usrsctp_accept(struct socket *so,
+ struct sockaddr * aname,
+ socklen_t * anamelen);
+
+struct socket *
+usrsctp_peeloff(struct socket *, sctp_assoc_t);
+
+int
+usrsctp_connect(struct socket *so,
+ struct sockaddr *name,
+ socklen_t namelen);
+
+int
+usrsctp_connectx(struct socket *so,
+ const struct sockaddr *addrs, int addrcnt,
+ sctp_assoc_t *id);
+
+void
+usrsctp_close(struct socket *so);
+
+sctp_assoc_t
+usrsctp_getassocid(struct socket *, struct sockaddr *);
+
+int
+usrsctp_finish(void);
+
+int
+usrsctp_shutdown(struct socket *so, int how);
+
+void
+usrsctp_conninput(void *, const void *, size_t, uint8_t);
+
+int
+usrsctp_set_non_blocking(struct socket *, int);
+
+int
+usrsctp_get_non_blocking(struct socket *);
+
+void
+usrsctp_register_address(void *);
+
+void
+usrsctp_deregister_address(void *);
+
+int
+usrsctp_set_ulpinfo(struct socket *, void *);
+
+int
+usrsctp_get_ulpinfo(struct socket *, void **);
+
+int
+usrsctp_set_upcall(struct socket *so,
+ void (*upcall)(struct socket *, void *, int),
+ void *arg);
+
+int
+usrsctp_get_events(struct socket *so);
+
+
+void
+usrsctp_handle_timers(uint32_t elapsed_milliseconds);
+
+#define SCTP_DUMP_OUTBOUND 1
+#define SCTP_DUMP_INBOUND 0
+
+char *
+usrsctp_dumppacket(const void *, size_t, int);
+
+void
+usrsctp_freedumpbuffer(char *);
+
+void
+usrsctp_enable_crc32c_offload(void);
+
+void
+usrsctp_disable_crc32c_offload(void);
+
+uint32_t
+usrsctp_crc32c(void *, size_t);
+
+#define USRSCTP_TUNABLE_DECL(__field) \
+int usrsctp_tunable_set_ ## __field(uint32_t value);\
+uint32_t usrsctp_sysctl_get_ ## __field(void);
+
+USRSCTP_TUNABLE_DECL(sctp_hashtblsize)
+USRSCTP_TUNABLE_DECL(sctp_pcbtblsize)
+USRSCTP_TUNABLE_DECL(sctp_chunkscale)
+
+#define USRSCTP_SYSCTL_DECL(__field) \
+int usrsctp_sysctl_set_ ## __field(uint32_t value);\
+uint32_t usrsctp_sysctl_get_ ## __field(void);
+
+USRSCTP_SYSCTL_DECL(sctp_sendspace)
+USRSCTP_SYSCTL_DECL(sctp_recvspace)
+USRSCTP_SYSCTL_DECL(sctp_auto_asconf)
+USRSCTP_SYSCTL_DECL(sctp_multiple_asconfs)
+USRSCTP_SYSCTL_DECL(sctp_ecn_enable)
+USRSCTP_SYSCTL_DECL(sctp_pr_enable)
+USRSCTP_SYSCTL_DECL(sctp_auth_enable)
+USRSCTP_SYSCTL_DECL(sctp_asconf_enable)
+USRSCTP_SYSCTL_DECL(sctp_reconfig_enable)
+USRSCTP_SYSCTL_DECL(sctp_nrsack_enable)
+USRSCTP_SYSCTL_DECL(sctp_pktdrop_enable)
+USRSCTP_SYSCTL_DECL(sctp_no_csum_on_loopback)
+USRSCTP_SYSCTL_DECL(sctp_peer_chunk_oh)
+USRSCTP_SYSCTL_DECL(sctp_max_burst_default)
+USRSCTP_SYSCTL_DECL(sctp_max_chunks_on_queue)
+USRSCTP_SYSCTL_DECL(sctp_min_split_point)
+USRSCTP_SYSCTL_DECL(sctp_delayed_sack_time_default)
+USRSCTP_SYSCTL_DECL(sctp_sack_freq_default)
+USRSCTP_SYSCTL_DECL(sctp_system_free_resc_limit)
+USRSCTP_SYSCTL_DECL(sctp_asoc_free_resc_limit)
+USRSCTP_SYSCTL_DECL(sctp_heartbeat_interval_default)
+USRSCTP_SYSCTL_DECL(sctp_pmtu_raise_time_default)
+USRSCTP_SYSCTL_DECL(sctp_shutdown_guard_time_default)
+USRSCTP_SYSCTL_DECL(sctp_secret_lifetime_default)
+USRSCTP_SYSCTL_DECL(sctp_rto_max_default)
+USRSCTP_SYSCTL_DECL(sctp_rto_min_default)
+USRSCTP_SYSCTL_DECL(sctp_rto_initial_default)
+USRSCTP_SYSCTL_DECL(sctp_init_rto_max_default)
+USRSCTP_SYSCTL_DECL(sctp_valid_cookie_life_default)
+USRSCTP_SYSCTL_DECL(sctp_init_rtx_max_default)
+USRSCTP_SYSCTL_DECL(sctp_assoc_rtx_max_default)
+USRSCTP_SYSCTL_DECL(sctp_path_rtx_max_default)
+USRSCTP_SYSCTL_DECL(sctp_add_more_threshold)
+USRSCTP_SYSCTL_DECL(sctp_nr_incoming_streams_default)
+USRSCTP_SYSCTL_DECL(sctp_nr_outgoing_streams_default)
+USRSCTP_SYSCTL_DECL(sctp_cmt_on_off)
+USRSCTP_SYSCTL_DECL(sctp_cmt_use_dac)
+USRSCTP_SYSCTL_DECL(sctp_use_cwnd_based_maxburst)
+USRSCTP_SYSCTL_DECL(sctp_nat_friendly)
+USRSCTP_SYSCTL_DECL(sctp_L2_abc_variable)
+USRSCTP_SYSCTL_DECL(sctp_mbuf_threshold_count)
+USRSCTP_SYSCTL_DECL(sctp_do_drain)
+USRSCTP_SYSCTL_DECL(sctp_hb_maxburst)
+USRSCTP_SYSCTL_DECL(sctp_abort_if_one_2_one_hits_limit)
+USRSCTP_SYSCTL_DECL(sctp_min_residual)
+USRSCTP_SYSCTL_DECL(sctp_max_retran_chunk)
+USRSCTP_SYSCTL_DECL(sctp_logging_level)
+USRSCTP_SYSCTL_DECL(sctp_default_cc_module)
+USRSCTP_SYSCTL_DECL(sctp_default_frag_interleave)
+USRSCTP_SYSCTL_DECL(sctp_mobility_base)
+USRSCTP_SYSCTL_DECL(sctp_mobility_fasthandoff)
+USRSCTP_SYSCTL_DECL(sctp_inits_include_nat_friendly)
+USRSCTP_SYSCTL_DECL(sctp_udp_tunneling_port)
+USRSCTP_SYSCTL_DECL(sctp_enable_sack_immediately)
+USRSCTP_SYSCTL_DECL(sctp_vtag_time_wait)
+USRSCTP_SYSCTL_DECL(sctp_blackhole)
+USRSCTP_SYSCTL_DECL(sctp_sendall_limit)
+USRSCTP_SYSCTL_DECL(sctp_diag_info_code)
+USRSCTP_SYSCTL_DECL(sctp_fr_max_burst_default)
+USRSCTP_SYSCTL_DECL(sctp_path_pf_threshold)
+USRSCTP_SYSCTL_DECL(sctp_default_ss_module)
+USRSCTP_SYSCTL_DECL(sctp_rttvar_bw)
+USRSCTP_SYSCTL_DECL(sctp_rttvar_rtt)
+USRSCTP_SYSCTL_DECL(sctp_rttvar_eqret)
+USRSCTP_SYSCTL_DECL(sctp_steady_step)
+USRSCTP_SYSCTL_DECL(sctp_use_dccc_ecn)
+USRSCTP_SYSCTL_DECL(sctp_buffer_splitting)
+USRSCTP_SYSCTL_DECL(sctp_initial_cwnd)
+#ifdef SCTP_DEBUG
+USRSCTP_SYSCTL_DECL(sctp_debug_on)
+/* More specific values can be found in sctp_constants, but
+ * are not considered to be part of the API.
+ */
+#define SCTP_DEBUG_NONE 0x00000000
+#define SCTP_DEBUG_ALL 0xffffffff
+#endif
+#undef USRSCTP_SYSCTL_DECL
+struct sctp_timeval {
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+};
+
+struct sctpstat {
+ struct sctp_timeval sctps_discontinuitytime; /* sctpStats 18 (TimeStamp) */
+ /* MIB according to RFC 3873 */
+ uint32_t sctps_currestab; /* sctpStats 1 (Gauge32) */
+ uint32_t sctps_activeestab; /* sctpStats 2 (Counter32) */
+ uint32_t sctps_restartestab;
+ uint32_t sctps_collisionestab;
+ uint32_t sctps_passiveestab; /* sctpStats 3 (Counter32) */
+ uint32_t sctps_aborted; /* sctpStats 4 (Counter32) */
+ uint32_t sctps_shutdown; /* sctpStats 5 (Counter32) */
+ uint32_t sctps_outoftheblue; /* sctpStats 6 (Counter32) */
+ uint32_t sctps_checksumerrors; /* sctpStats 7 (Counter32) */
+ uint32_t sctps_outcontrolchunks; /* sctpStats 8 (Counter64) */
+ uint32_t sctps_outorderchunks; /* sctpStats 9 (Counter64) */
+ uint32_t sctps_outunorderchunks; /* sctpStats 10 (Counter64) */
+ uint32_t sctps_incontrolchunks; /* sctpStats 11 (Counter64) */
+ uint32_t sctps_inorderchunks; /* sctpStats 12 (Counter64) */
+ uint32_t sctps_inunorderchunks; /* sctpStats 13 (Counter64) */
+ uint32_t sctps_fragusrmsgs; /* sctpStats 14 (Counter64) */
+ uint32_t sctps_reasmusrmsgs; /* sctpStats 15 (Counter64) */
+ uint32_t sctps_outpackets; /* sctpStats 16 (Counter64) */
+ uint32_t sctps_inpackets; /* sctpStats 17 (Counter64) */
+
+ /* input statistics: */
+ uint32_t sctps_recvpackets; /* total input packets */
+ uint32_t sctps_recvdatagrams; /* total input datagrams */
+ uint32_t sctps_recvpktwithdata; /* total packets that had data */
+ uint32_t sctps_recvsacks; /* total input SACK chunks */
+ uint32_t sctps_recvdata; /* total input DATA chunks */
+ uint32_t sctps_recvdupdata; /* total input duplicate DATA chunks */
+ uint32_t sctps_recvheartbeat; /* total input HB chunks */
+ uint32_t sctps_recvheartbeatack; /* total input HB-ACK chunks */
+ uint32_t sctps_recvecne; /* total input ECNE chunks */
+ uint32_t sctps_recvauth; /* total input AUTH chunks */
+ uint32_t sctps_recvauthmissing; /* total input chunks missing AUTH */
+ uint32_t sctps_recvivalhmacid; /* total number of invalid HMAC ids received */
+ uint32_t sctps_recvivalkeyid; /* total number of invalid secret ids received */
+ uint32_t sctps_recvauthfailed; /* total number of auth failed */
+ uint32_t sctps_recvexpress; /* total fast path receives all one chunk */
+ uint32_t sctps_recvexpressm; /* total fast path multi-part data */
+ uint32_t sctps_recv_spare; /* formerly sctps_recvnocrc */
+ uint32_t sctps_recvswcrc;
+ uint32_t sctps_recvhwcrc;
+
+ /* output statistics: */
+ uint32_t sctps_sendpackets; /* total output packets */
+ uint32_t sctps_sendsacks; /* total output SACKs */
+ uint32_t sctps_senddata; /* total output DATA chunks */
+ uint32_t sctps_sendretransdata; /* total output retransmitted DATA chunks */
+ uint32_t sctps_sendfastretrans; /* total output fast retransmitted DATA chunks */
+ uint32_t sctps_sendmultfastretrans; /* total FR's that happened more than once
+ * to same chunk (u-del multi-fr algo).
+ */
+ uint32_t sctps_sendheartbeat; /* total output HB chunks */
+ uint32_t sctps_sendecne; /* total output ECNE chunks */
+ uint32_t sctps_sendauth; /* total output AUTH chunks FIXME */
+ uint32_t sctps_senderrors; /* ip_output error counter */
+ uint32_t sctps_send_spare; /* formerly sctps_sendnocrc */
+ uint32_t sctps_sendswcrc;
+ uint32_t sctps_sendhwcrc;
+ /* PCKDROPREP statistics: */
+ uint32_t sctps_pdrpfmbox; /* Packet drop from middle box */
+ uint32_t sctps_pdrpfehos; /* P-drop from end host */
+ uint32_t sctps_pdrpmbda; /* P-drops with data */
+ uint32_t sctps_pdrpmbct; /* P-drops, non-data, non-endhost */
+ uint32_t sctps_pdrpbwrpt; /* P-drop, non-endhost, bandwidth rep only */
+ uint32_t sctps_pdrpcrupt; /* P-drop, not enough for chunk header */
+ uint32_t sctps_pdrpnedat; /* P-drop, not enough data to confirm */
+ uint32_t sctps_pdrppdbrk; /* P-drop, where process_chunk_drop said break */
+ uint32_t sctps_pdrptsnnf; /* P-drop, could not find TSN */
+ uint32_t sctps_pdrpdnfnd; /* P-drop, attempt reverse TSN lookup */
+ uint32_t sctps_pdrpdiwnp; /* P-drop, e-host confirms zero-rwnd */
+ uint32_t sctps_pdrpdizrw; /* P-drop, midbox confirms no space */
+ uint32_t sctps_pdrpbadd; /* P-drop, data did not match TSN */
+ uint32_t sctps_pdrpmark; /* P-drop, TSN's marked for Fast Retran */
+ /* timeouts */
+ uint32_t sctps_timoiterator; /* Number of iterator timers that fired */
+ uint32_t sctps_timodata; /* Number of T3 data time outs */
+ uint32_t sctps_timowindowprobe; /* Number of window probe (T3) timers that fired */
+ uint32_t sctps_timoinit; /* Number of INIT timers that fired */
+ uint32_t sctps_timosack; /* Number of sack timers that fired */
+ uint32_t sctps_timoshutdown; /* Number of shutdown timers that fired */
+ uint32_t sctps_timoheartbeat; /* Number of heartbeat timers that fired */
+ uint32_t sctps_timocookie; /* Number of times a cookie timeout fired */
+ uint32_t sctps_timosecret; /* Number of times an endpoint changed its cookie secret*/
+ uint32_t sctps_timopathmtu; /* Number of PMTU timers that fired */
+ uint32_t sctps_timoshutdownack; /* Number of shutdown ack timers that fired */
+ uint32_t sctps_timoshutdownguard; /* Number of shutdown guard timers that fired */
+ uint32_t sctps_timostrmrst; /* Number of stream reset timers that fired */
+ uint32_t sctps_timoearlyfr; /* Number of early FR timers that fired */
+ uint32_t sctps_timoasconf; /* Number of times an asconf timer fired */
+ uint32_t sctps_timodelprim; /* Number of times a prim_deleted timer fired */
+ uint32_t sctps_timoautoclose; /* Number of times auto close timer fired */
+ uint32_t sctps_timoassockill; /* Number of asoc free timers expired */
+ uint32_t sctps_timoinpkill; /* Number of inp free timers expired */
+ /* former early FR counters */
+ uint32_t sctps_spare[11];
+ /* others */
+ uint32_t sctps_hdrops; /* packet shorter than header */
+ uint32_t sctps_badsum; /* checksum error */
+ uint32_t sctps_noport; /* no endpoint for port */
+ uint32_t sctps_badvtag; /* bad v-tag */
+ uint32_t sctps_badsid; /* bad SID */
+ uint32_t sctps_nomem; /* no memory */
+ uint32_t sctps_fastretransinrtt; /* number of multiple FR in a RTT window */
+ uint32_t sctps_markedretrans;
+ uint32_t sctps_naglesent; /* nagle allowed sending */
+ uint32_t sctps_naglequeued; /* nagle doesn't allow sending */
+ uint32_t sctps_maxburstqueued; /* max burst doesn't allow sending */
+ uint32_t sctps_ifnomemqueued; /* look ahead tells us no memory in
+ * interface ring buffer OR we had a
+ * send error and are queuing one send.
+ */
+ uint32_t sctps_windowprobed; /* total number of window probes sent */
+ uint32_t sctps_lowlevelerr; /* total times an output error causes us
+ * to clamp down on next user send.
+ */
+ uint32_t sctps_lowlevelerrusr; /* total times sctp_senderrors were caused from
+ * a user send from a user invoked send not
+ * a sack response
+ */
+ uint32_t sctps_datadropchklmt; /* Number of in data drops due to chunk limit reached */
+ uint32_t sctps_datadroprwnd; /* Number of in data drops due to rwnd limit reached */
+ uint32_t sctps_ecnereducedcwnd; /* Number of times a ECN reduced the cwnd */
+ uint32_t sctps_vtagexpress; /* Used express lookup via vtag */
+ uint32_t sctps_vtagbogus; /* Collision in express lookup. */
+ uint32_t sctps_primary_randry; /* Number of times the sender ran dry of user data on primary */
+ uint32_t sctps_cmt_randry; /* Same for above */
+ uint32_t sctps_slowpath_sack; /* Sacks the slow way */
+ uint32_t sctps_wu_sacks_sent; /* Window Update only sacks sent */
+ uint32_t sctps_sends_with_flags; /* number of sends with sinfo_flags !=0 */
+ uint32_t sctps_sends_with_unord; /* number of unordered sends */
+ uint32_t sctps_sends_with_eof; /* number of sends with EOF flag set */
+ uint32_t sctps_sends_with_abort; /* number of sends with ABORT flag set */
+ uint32_t sctps_protocol_drain_calls;/* number of times protocol drain called */
+ uint32_t sctps_protocol_drains_done;/* number of times we did a protocol drain */
+ uint32_t sctps_read_peeks; /* Number of times recv was called with peek */
+ uint32_t sctps_cached_chk; /* Number of cached chunks used */
+ uint32_t sctps_cached_strmoq; /* Number of cached stream oq's used */
+ uint32_t sctps_left_abandon; /* Number of unread messages abandoned by close */
+ uint32_t sctps_send_burst_avoid; /* Unused */
+ uint32_t sctps_send_cwnd_avoid; /* Send cwnd full avoidance, already max burst inflight to net */
+ uint32_t sctps_fwdtsn_map_over; /* number of map array over-runs via fwd-tsn's */
+ uint32_t sctps_queue_upd_ecne; /* Number of times we queued or updated an ECN chunk on send queue */
+ uint32_t sctps_reserved[31]; /* Future ABI compat - remove int's from here when adding new */
+};
+
+void
+usrsctp_get_stat(struct sctpstat *);
+
+#ifdef _WIN32
+#ifdef _MSC_VER
+#pragma warning(default: 4200)
+#endif
+#endif
+#ifdef __cplusplus
+}
+#endif
+#endif