tcp.c revision be4c8f742bc67a43d01e3ea82a814b7d6503dbfd
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
#define _SUN_TPI_VERSION 2
#include <sys/xti_inet.h>
#include <sys/ethernet.h>
#include <sys/socketvar.h>
#include <sys/isa_defs.h>
#include <inet/ipsec_impl.h>
#include <inet/proto_set.h>
#include <inet/kstatcom.h>
#include <inet/tcp_impl.h>
#include <inet/tcp_cluster.h>
#include <inet/udp_impl.h>
#include <inet/ipclassifier.h>
#include <inet/ip_ftable.h>
#include <inet/ipp_common.h>
#include <inet/ip_netinfo.h>
#include <sys/squeue_impl.h>
#include <rpc/pmap_prot.h>
/*
* TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
*
* (Read the detailed design doc in PSARC case directory)
*
* The entire tcp state is contained in tcp_t and conn_t structure
* which are allocated in tandem using ipcl_conn_create() and passing
* IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
* the references on the tcp_t. The tcp_t structure is never compressed
* and packets always land on the correct TCP perimeter from the time
* eager is created till the time tcp_t dies (as such the old mentat
* TCP global queue is not used for detached state and no IPSEC checking
* is required). The global queue is still allocated to send out resets
* for connection which have no listeners and IP directly calls
* tcp_xmit_listeners_reset() which does any policy check.
*
* Protection and Synchronisation mechanism:
*
* The tcp data structure does not use any kind of lock for protecting
* its state but instead uses 'squeues' for mutual exclusion from various
* read and write side threads. To access a tcp member, the thread should
* always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
* or SQ_NODRAIN). Since the squeues allow a direct function call, caller
* can pass any tcp function having prototype of edesc_t as argument
* (different from traditional STREAMs model where packets come in only
* designated entry points). The list of functions that can be directly
* called via squeue are listed before the usual function prototype.
*
* Referencing:
*
* TCP is MT-Hot and we use a reference based scheme to make sure that the
* tcp structure doesn't disappear when its needed. When the application
* creates an outgoing connection or accepts an incoming connection, we
* start out with 2 references on 'conn_ref'. One for TCP and one for IP.
* The IP reference is just a symbolic reference since ip_tcpclose()
* looks at tcp structure after tcp_close_output() returns which could
* have dropped the last TCP reference. So as long as the connection is
* in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
* conn_t. The classifier puts its own reference when the connection is
* inserted in listen or connected hash. Anytime a thread needs to enter
* on write side or by doing a classify on read side and then puts a
* reference on the conn before doing squeue_enter/tryenter/fill. For
* read side, the classifier itself puts the reference under fanout lock
* to make sure that tcp can't disappear before it gets processed. The
* squeue will drop this reference automatically so the called function
* doesn't have to do a DEC_REF.
*
* Opening a new connection:
*
* The outgoing connection open is pretty simple. tcp_open() does the
* squeue assignment is done based on the CPU the application
* is running on. So for outbound connections, processing is always done
* on application CPU which might be different from the incoming CPU
* being interrupted by the NIC. An optimal way would be to figure out
* the NIC <-> CPU binding at listen time, and assign the outgoing
* connection to the squeue attached to the CPU that will be interrupted
* for incoming packets (we know the NIC based on the bind IP address).
* This might seem like a problem if more data is going out but the
* fact is that in most cases the transmit is ACK driven transmit where
* the outgoing data normally sits on TCP's xmit queue waiting to be
* transmitted.
*
* Accepting a connection:
*
* This is a more interesting case because of various races involved in
* establishing a eager in its own perimeter. Read the meta comment on
* top of tcp_input_listener(). But briefly, the squeue is picked by
* ip_fanout based on the ring or the sender (if loopback).
*
* Closing a connection:
*
* The close is fairly straight forward. tcp_close() calls tcp_close_output()
* via squeue to do the close and mark the tcp as detached if the connection
* was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
* reference but tcp_close() drop IP's reference always. So if tcp was
* not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
* and 1 because it is in classifier's connected hash. This is the condition
* we use to determine that its OK to clean up the tcp outside of squeue
* when time wait expires (check the ref under fanout and conn_lock and
* if it is 2, remove it from fanout hash and kill it).
*
* Although close just drops the necessary references and marks the
* tcp_detached state, tcp_close needs to know the tcp_detached has been
* set (under squeue) before letting the STREAM go away (because a
* inbound packet might attempt to go up the STREAM while the close
* has happened and tcp_detached is not set). So a special lock and
* flag is used along with a condition variable (tcp_closelock, tcp_closed,
* and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
* tcp_detached.
*
* Special provisions and fast paths:
*
* We make special provisions for sockfs by marking tcp_issocket
* whenever we have only sockfs on top of TCP. This allows us to skip
* putting the tcp in acceptor hash since a sockfs listener can never
* become acceptor and also avoid allocating a tcp_t for acceptor STREAM
* since eager has already been allocated and the accept now happens
* on acceptor STREAM. There is a big blob of comment on top of
* tcp_input_listener explaining the new accept. When socket is POP'd,
* sockfs sends us an ioctl to mark the fact and we go back to old
* behaviour. Once tcp_issocket is unset, its never set for the
* life of that connection.
*
* IPsec notes :
*
* Since a packet is always executed on the correct TCP perimeter
* all IPsec processing is defered to IP including checking new
* connections and setting IPSEC policies for new connection. The
* only exception is tcp_xmit_listeners_reset() which is called
* directly from IP and needs to policy check to see if TH_RST
* can be sent out.
*/
/*
* Values for squeue switch:
* 1: SQ_NODRAIN
* 2: SQ_PROCESS
* 3: SQ_FILL
*/
int tcp_squeue_flag;
/*
* To prevent memory hog, limit the number of entries in tcp_free_list
* to 1% of available memory / number of cpus
*/
#define TCP_XMIT_LOWATER 4096
#define TCP_XMIT_HIWATER 49152
#define TCP_RECV_LOWATER 2048
#define TCP_RECV_HIWATER 128000
/*
* Size of acceptor hash list. It has to be a power of 2 for hashing.
*/
#define TCP_ACCEPTOR_FANOUT_SIZE 256
#ifdef _ILP32
#define TCP_ACCEPTOR_HASH(accid) \
#else
#define TCP_ACCEPTOR_HASH(accid) \
#endif /* _ILP32 */
/* Minimum number of connections per listener. */
/* TCP Timer control structure */
typedef struct tcpt_s {
} tcpt_t;
/*
* Functions called directly via squeue having a prototype of edesc_t.
*/
/* Prototype for TCP functions */
static void tcp_random_init(void);
int tcp_random(void);
tcp_stack_t *);
static int tcp_squeue_switch(int);
static void tcp_squeue_add(squeue_t *);
struct module_info tcp_rinfo = {
};
static struct module_info tcp_winfo = {
};
/*
* Entry points for TCP as a device. The normal case which supports
* the TCP functionality.
*/
struct qinit tcp_rinitv4 = {
};
struct qinit tcp_rinitv6 = {
};
};
/* Initial entry point for TCP in socket mode. */
struct qinit tcp_sock_winit = {
};
/* TCP entry point during fallback */
struct qinit tcp_fallback_sock_winit = {
};
/*
* Entry points for TCP as a acceptor STREAM opened by sockfs when doing
* an accept. Avoid allocating data structures since eager has already
* been created.
*/
struct qinit tcp_acceptor_rinit = {
};
struct qinit tcp_acceptor_winit = {
};
};
};
/*
* Following assumes TPI alignment requirements stay along 32 bit
* boundaries
*/
#define ROUNDUP32(x) \
/* Template for response to info request. */
struct T_info_ack tcp_g_t_info_ack = {
T_INFO_ACK, /* PRIM_type */
0, /* TSDU_size */
T_INFINITE, /* ETSDU_size */
T_INVALID, /* CDATA_size */
T_INVALID, /* DDATA_size */
sizeof (sin_t), /* ADDR_size */
0, /* OPT_size - not initialized here */
TIDUSZ, /* TIDU_size */
T_COTS_ORD, /* SERV_type */
TCPS_IDLE, /* CURRENT_state */
};
struct T_info_ack tcp_g_t_info_ack_v6 = {
T_INFO_ACK, /* PRIM_type */
0, /* TSDU_size */
T_INFINITE, /* ETSDU_size */
T_INVALID, /* CDATA_size */
T_INVALID, /* DDATA_size */
sizeof (sin6_t), /* ADDR_size */
0, /* OPT_size - not initialized here */
TIDUSZ, /* TIDU_size */
T_COTS_ORD, /* SERV_type */
TCPS_IDLE, /* CURRENT_state */
};
/* Max size IP datagram is 64k - 1 */
/* Max of the above */
#define TCP_MSS_MAX TCP_MSS_MAX_IPV4
/* Largest TCP port number */
/*
* layer header. It has to be a multiple of 4.
*/
/*
* Note that the default value of "tcp_time_wait_interval" is four minutes,
* per the TCP spec.
*/
/* BEGIN CSTYLED */
static tcpparam_t lcl_tcp_param_arr[] = {
/*min max value name */
{ 1, 1024, 1, "tcp_conn_req_min" },
{ 0, 10, 0, "tcp_debug" },
{ 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"},
{ 1, 255, 64, "tcp_ipv4_ttl"},
{ 0, 100, 10, "tcp_maxpsz_multiplier" },
{ 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"},
{ 0, 16, 0, "tcp_snd_lowat_fraction" },
{ 1, 10000, 3, "tcp_dupack_fast_retransmit" },
{ 0, 1, 0, "tcp_ignore_path_mtu" },
{ 1, 65536, 4, "tcp_recv_hiwat_minmss"},
/*
* Question: What default value should I set for tcp_strong_iss?
*/
{ 0, 2, 1, "tcp_strong_iss"},
{ 0, 65536, 20, "tcp_rtt_updates"},
{ 0, 1, 1, "tcp_wscale_always"},
{ 0, 1, 0, "tcp_tstamp_always"},
{ 0, 1, 1, "tcp_tstamp_if_wscale"},
{ 0, 16, 2, "tcp_deferred_acks_max"},
{ 1, 16384, 4, "tcp_slow_start_after_idle"},
{ 1, 4, 4, "tcp_slow_start_initial"},
{ 0, 2, 2, "tcp_sack_permitted"},
{ 0, 1, 0, "tcp_rev_src_routes"},
{ 0, 16, 8, "tcp_local_dacks_max"},
{ 0, 2, 1, "tcp_ecn_permitted"},
{ 0, 1, 1, "tcp_rst_sent_rate_enabled"},
{ 0, 1, 0, "tcp_use_smss_as_mss_opt"},
{ 0, 1, 0, "tcp_dev_flow_ctl"},
};
/* END CSTYLED */
#define IS_VMLOANED_MBLK(mp) \
/*
* Forces all connections to obey the value of the tcps_maxpsz_multiplier
* tunable settable via NDD. Otherwise, the per-connection behavior is
* determined dynamically during tcp_set_destination(), which is the default.
*/
/*
* If the receive buffer size is changed, this function is called to update
* the upper socket layer on the new delayed receive wake up threshold.
*/
static void
{
struct sock_proto_props sopp;
/*
* only increase rcvthresh upto default_threshold
*/
if (new_rcvthresh > default_threshold)
}
}
/*
* Figure out the value of window scale opton. Note that the rwnd is
* ASSUMED to be rounded up to the nearest MSS before the calculation.
* We cannot find the scale value and then do a round up of tcp_rwnd
* because the scale value may not be correct after that.
*
* Set the compiler flag to make this function inline.
*/
void
{
int i;
i++, rwnd >>= 1)
;
tcp->tcp_rcv_ws = i;
}
/*
*/
void
{
}
}
}
}
}
/*
* Cleaup before placing on free list.
* Disassociate from the netstack/tcp_stack_t since the freelist
* is per squeue and not per netstack.
*/
void
{
/* Cleanup that which needs the netstack first */
connp->conn_ht_iphc_allocated = 0;
connp->conn_ht_iphc_len = 0;
connp->conn_ht_ulp_len = 0;
}
/* We clear any IP_OPTIONS and extension headers */
/* Release any SSL context */
}
}
/*
* Since we will bzero the entire structure, we need to
* remove it and reinsert it in global hash list. We
* know the walkers can't get to this conn because we
* had set CONDEMNED flag earlier and checked reference
* under conn_lock so walker won't pick it and when we
* go the ipcl_globalhash_remove() below, no walker
* can get to it.
*/
/* Save some state */
}
/*
* Now it is safe to decrement the reference counts.
* This might be the last reference on the netstack
* in which case it will cause the freeing of the IP Instance.
*/
/* restore the state */
}
/*
* Adapt to the information, such as rtt and rtt_sd, provided from the
* DCE and IRE maintained by IP.
*
* Checks for multicast and broadcast destination address.
* Returns zero if ok; an errno on failure.
*
* Note that the MSS calculation here is based on the info given in
* the DCE and IRE. We do not do any calculation based on TCP options. They
* will be handled in tcp_input_data() when TCP knows which options to use.
*
* Note on how TCP gets its parameters for a connection.
*
* When a tcp_t structure is allocated, it gets all the default parameters.
* In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
* spipe, rpipe, ... from the route metrics. Route metric overrides the
* default.
*
* An incoming SYN with a multicast or broadcast destination address is dropped
* in ip_fanout_v4/v6.
*
* An incoming SYN with a multicast or broadcast source address is always
* dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
* conn_connect.
* The same logic in tcp_set_destination also serves to
* reject an attempt to connect to a broadcast or multicast (destination)
* address.
*/
int
{
int error;
/*
* Make sure we have a dce for the destination to avoid dce_ident
* contention for connected sockets.
*/
flags |= IPDF_UNIQUE_DCE;
if (!tcps->tcps_ignore_path_mtu)
/* Use conn_lock to satify ASSERT; tcp is already serialized */
if (error != 0)
return (error);
if (error != 0)
return (error);
} else {
}
}
if (uinfo.iulp_ssthresh != 0)
else
if (uinfo.iulp_spipe > 0) {
tcps->tcps_max_buf);
if (tcps->tcps_snd_lowat_fraction != 0) {
}
}
/*
* Note that up till now, acceptor always inherits receive
* window from the listener. But if there is a metrics
* associated with a host, we should use that instead of
* inheriting it from listener. Thus we need to pass this
* info back to the caller.
*/
if (uinfo.iulp_rpipe > 0) {
tcps->tcps_max_buf);
}
if (uinfo.iulp_rtomax > 0) {
}
/*
* Use the metric option settings, iulp_tstamp_ok and
* iulp_wscale_ok, only for active open. What this means
* is that if the other side uses timestamp or window
* scale option, TCP will also use those options. That
* is for passive open. If the application sets a
* large window, window scale is enabled regardless of
* the value in iulp_wscale_ok. This is the behavior
* since 2.6. So we keep it.
* The only case left in passive open processing is the
* check for SACK.
* For ECN, it should probably be like SACK. But the
* current value is binary, so we treat it like the other
* cases. The metric only controls active open.For passive
* open, the ndd param, tcp_ecn_permitted, controls the
* behavior.
*/
if (!tcp_detached) {
/*
* The if check means that the following can only
* be turned on by the metrics only IRE, but not off.
*/
if (uinfo.iulp_tstamp_ok)
if (uinfo.iulp_wscale_ok)
if (uinfo.iulp_ecn_ok)
} else {
/*
* Passive open.
*
* As above, the if check means that SACK can only be
* turned on by the metric only IRE.
*/
}
}
/*
* XXX Note that currently, iulp_mtu can be as small as 68
* because of PMTUd. So tcp_mss may go to negative if combined
* length of all those options exceeds 28 bytes. But because
* of the tcp_mss_min check below, we may not have a problem if
* tcp_mss_min is of a reasonable value. The default is 1 so
* the negative problem still exists. And the check defeats PMTUd.
* In fact, if PMTUd finds that the MSS should be smaller than
* tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
* value.
*
* We do not deal with that now. All those problems related to
* PMTUd will be fixed later.
*/
/* Sanity check for MSS value. */
else
if (tcp->tcp_ipsec_overhead == 0)
/* Note that this is the maximum MSS, excluding all options. */
/*
* Update the tcp connection with LSO capability.
*/
/*
* Initialize the ISS here now that we have the full connection ID.
* The RFC 1948 method of initial sequence number generation requires
* knowledge of the full connection ID before setting the ISS.
*/
/*
* Make sure that conn is not marked incipient
* for incoming connections. A blind
* removal of incipient flag is cheaper than
* check and removal.
*/
return (0);
}
/*
* tcp_clean_death / tcp_close_detached must not be called more than once
* on a tcp. Thus every function that potentially calls tcp_clean_death
* must check for the tcp state before calling tcp_clean_death.
* Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
* tcp_timer_handler, all check for the tcp state.
*/
/* ARGSUSED */
void
{
}
/*
* We are dying for some reason. Try to do it gracefully. (May be called
* as writer.)
*
* Return -1 if the structure was not cleaned up (if the cleanup had to be
* done by a service procedure).
* TBD - Should the return value distinguish between the tcp_t being
* freed and it being reinitialized?
*/
int
{
queue_t *q;
if (tcp->tcp_linger_tid != 0 &&
}
if (TCP_IS_DETACHED(tcp)) {
if (tcp->tcp_hard_binding) {
/*
* Its an eager that we are dealing with. We close the
* eager but in case a conn_ind has already gone to the
* listener, let tcp_accept_finish() send a discon_ind
* to the listener and drop the last reference. If the
* listener doesn't even know about the eager i.e. the
* conn_ind hasn't gone up, blow away the eager and drop
* the last reference as well. If the conn_ind has gone
* up, state should be BOUND. tcp_accept_finish
* will figure out that the connection has received a
* RST and will send a DISCON_IND to the application.
*/
if (!tcp->tcp_tconnind_started) {
} else {
}
} else {
}
return (0);
}
/*
* The connection is dead. Decrement listener connection counter if
* necessary.
*/
/*
* When a connection is moved to TIME_WAIT state, the connection
* counter is already decremented. So no need to decrement here
* again. See SET_TIME_WAIT() macro.
*/
}
/* Trash all inbound data */
if (!IPCL_IS_NONSTR(connp)) {
}
/*
* If we are at least part way open and there is error
* (err==0 implies no error)
* notify our client by a T_DISCON_IND.
*/
!TCP_IS_SOCKET(tcp)) {
/*
* Send M_FLUSH according to TPI. Because sockets will
* (and must) ignore FLUSHR we do that only for TPI
* endpoints and sockets in STREAMS mode.
*/
}
if (connp->conn_debug) {
"tcp_clean_death: discon err %d", err);
}
if (IPCL_IS_NONSTR(connp)) {
/* Direct socket, use upcall */
} else {
} else {
if (connp->conn_debug) {
"tcp_clean_death, sending M_ERROR");
}
}
}
/* SYN_SENT or SYN_RCVD */
/* ESTABLISHED or CLOSE_WAIT */
}
}
if (IPCL_IS_NONSTR(connp))
(void) tcp_do_unbind(connp);
return (-1);
}
/*
* In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
* to expire, stop the wait and finish the close.
*/
void
{
tcp->tcp_linger_tid = 0;
if (tcp->tcp_flow_stopped) {
}
if (tcp->tcp_timer_tid != 0) {
tcp->tcp_timer_tid = 0;
}
/*
* Need to cancel those timers which will not be used when
* TCP is detached. This has to be done before the conn_wq
* is cleared.
*/
goto finish;
}
/*
* If delta is zero the timer event wasn't executed and was
* successfully canceled. In this case we need to restart it
* with the minimal delta possible.
*/
if (delta >= 0) {
}
} else {
}
/* Signal closing thread that it can complete close */
}
void
{
/*
* Mark the conn as closing. ipsq_pending_mp_add will not
* add any mp to the pending mp list, after this conn has
* started closing.
*/
/*
* tcp_closemp_used is used below without any protection of a lock
* as we don't expect any one else to use it concurrently at this
* point otherwise it would be a major defect.
*/
else
/*
* are re-written by tcp_close_output().
*/
/*
* As CONN_CLOSING is set, no further ioctls should be passed down to
* IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
* tcp_wput_iocdata). If the ioctl was queued on an ipsq,
* conn_ioctl_cleanup should have found it and removed it. If the ioctl
* was still in flight at the time, we wait for it here. See comments
* for CONN_INC_IOCTLREF in ip.h for details.
*/
while (connp->conn_ioctlref > 0)
while (!tcp->tcp_closed) {
/*
* The cv_wait_sig() was interrupted. We now do the
* following:
*
* 1) If the endpoint was lingering, we allow this
* to be interrupted by cancelling the linger timeout
* and closing normally.
*
* 2) Revert to calling cv_wait()
*
* We revert to using cv_wait() to avoid an
* infinite loop which can occur if the calling
* thread is higher priority than the squeue worker
* thread and is bound to the same cpu.
*/
/* Entering squeue, bump ref count. */
}
break;
}
}
while (!tcp->tcp_closed)
/*
* In the case of listener streams that have eagers in the q or q0
* we wait for the eagers to drop their reference to us. conn_rq and
* conn_wq of the eagers point to our queues. By waiting for the
* refcnt to drop to 1, we are sure that the eagers have cleaned
* up their queue pointers and also dropped their references to us.
*/
if (tcp->tcp_wait_for_eagers) {
}
}
}
/*
* Called by tcp_close() routine via squeue when lingering is
* interrupted by a signal.
*/
/* ARGSUSED */
static void
{
if (tcp->tcp_linger_tid != 0 &&
}
}
/*
* Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
* Some stream heads get upset if they see these later on as anything but NULL.
*/
void
{
do {
}
}
/* Do detached close. */
void
{
/*
* Clustering code serializes TCP disconnect callbacks and
* cluster tcp list walks by blocking a TCP disconnect callback
* if a cluster tcp list walk is in progress. This ensures
* accurate accounting of TCPs in the cluster code even though
* the TCP list walk itself is not atomic.
*/
}
/*
* The tcp_t is going away. Remove it from all lists and set it
* to TCPS_CLOSED. The freeing up of memory is deferred until
* tcp_inactive. This is needed since a thread in tcp_rput might have
* done a CONN_INC_REF on this structure before it was removed from the
* hashes.
*/
void
{
if (!TCP_IS_SOCKET(tcp))
tcp->tcp_ibsegs = 0;
tcp->tcp_obsegs = 0;
/*
* This can be called via tcp_time_wait_processing() if TCP gets a
* SYN with sequence number outside the TIME-WAIT connection's
* window. So we need to check for TIME-WAIT state here as the
* connection counter is already decremented. See SET_TIME_WAIT()
* macro
*/
}
/*
* If we are an eager connection hanging off a listener that
* hasn't formally accepted the connection yet, get off his
* list and blow off any data that we have accumulated.
*/
/*
* tcp_tconnind_started == B_TRUE means that the
* conn_ind has already gone to listener. At
* this point, eager will be closed but we
* leave it in listeners eager list so that
* if listener decides to close without doing
* accept, we can clean this up. In tcp_tli_accept
* we take care of the case of accept on closed
* eager.
*/
if (!tcp->tcp_tconnind_started) {
/*
* We don't want to have any pointers to the
* listener queue, after we have released our
* reference on the listener
*/
} else {
}
}
/* Stop all the timers */
if (tcp->tcp_ip_addr_cache) {
IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
}
}
/* Decrement listerner connection counter if necessary. */
if (tcp->tcp_flow_stopped)
/*
* If the tcp_time_wait_collector (which runs outside the squeue)
* is trying to remove this tcp from the time wait list, we will
* block in tcp_time_wait_remove while trying to acquire the
* tcp_time_wait_lock. The logic in tcp_time_wait_collector also
* requires the ipcl_hash_remove to be ordered after the
* tcp_time_wait_remove for the refcnt checks to work correctly.
*/
/*
* Mark the conn as CONDEMNED
*/
/* Release any SSL context */
}
}
}
/*
* tcp is dying (called from ipcl_conn_destroy and error cases).
* Free the tcp_t in either case.
*/
void
{
/* Free b_next chain */
}
}
}
}
}
tcp);
}
}
tcp->tcp_hopoptslen = 0;
}
tcp->tcp_dstoptslen = 0;
}
tcp->tcp_rthdrdstoptslen = 0;
}
tcp->tcp_rthdrlen = 0;
}
/*
* Following is really a blowing away a union.
* It happens to have exactly two members of identical size
* the following code is enough.
*/
}
/*
*
* tcp_get_conn is used to get a clean tcp connection structure.
* It tries to reuse the connections put on the freelist by the
* time_wait_collector failing which it goes to kmem_cache. This
* way has two benefits compared to just allocating from and
* freeing to kmem_cache.
* 1) The time_wait_collector can free (which includes the cleanup)
* outside the squeue. So when the interrupt comes, we have a clean
* connection sitting in the freelist. Obviously, this buys us
* performance.
*
* has multiple disadvantages - tying up the squeue during alloc.
* we can't check the 'q' and 'q0' which are protected by squeue and
* blindly allocate memory which might have to be freed here if we are
* not allowed to accept the connection. By using the freelist and
* accept the connection.
*
* Care should be taken to put the conn back in the same squeue's freelist
* from which it was allocated. Best results are obtained if conn is
* allocated from listener's squeue and freed to the same. Time wait
* collector will free up the freelist is the connection ends up sitting
* there for too long.
*/
void *
{
netstack_t *ns;
return ((void *)connp);
}
/*
* Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
*/
if (tcp_rsrv_mp == NULL)
return (NULL);
return (NULL);
}
/*
* Register tcp_notify to listen to capability changes detected by IP.
* This upcall is made in the context of the call to conn_ip_output
* thus it is inside the squeue.
*/
return ((void *)connp);
}
/*
* Handle connect to IPv4 destinations, including connections for AF_INET6
* sockets connecting to IPv4 mapped IPv6 destinations.
* Returns zero if OK, a positive errno, or a negative TLI error.
*/
static int
{
int error;
/* Check for attempt to connect to INADDR_ANY */
if (dstaddr == INADDR_ANY) {
/*
* SunOS 4.x and 4.3 BSD allow an application
* to connect a TCP socket to INADDR_ANY.
* When they do this, the kernel picks the
* address of one interface and uses it
* instead. The kernel usually ends up
* picking the address of the loopback
* interface. This is an undocumented feature.
* However, we provide the same thing here
* in order to have source and binary
* compatibility with SunOS 4.x.
* generate the T_CONN_CON.
*/
}
/* Handle __sin6_src_id if socket not bound to an IP address */
}
/*
* At this point the remote destination address and remote port fields
* in the tcp-four-tuple have been filled in the tcp structure. Now we
* have to see which state tcp was in so we can take appropriate action.
*/
/*
* We support a quick connect capability here, allowing
* clients to transition directly from IDLE to SYN_SENT
* tcp_bindi will pick an unused port, insert the connection
* in the bind hash and transition to BOUND state.
*/
if (lport == 0)
return (-TNOADDR);
}
/*
* Lookup the route to determine a source address and the uinfo.
*/
if (error != 0)
return (error);
/*
* Don't let an endpoint connect to itself.
*/
return (-TBADADDR);
return (ipcl_conn_insert_v4(connp));
}
/*
* Handle connect to IPv6 destinations.
* Returns zero if OK, a positive errno, or a negative TLI error.
*/
static int
{
int error;
/*
* If we're here, it means that the destination address is a native
* IPv6 address. Return an error if conn_ipversion is not IPv6. A
* reason why it might not be IPv6 is if the socket was bound to an
* IPv4-mapped IPv6 address.
*/
return (-TBADADDR);
/*
* Interpret a zero destination to mean loopback.
* generate the T_CONN_CON.
*/
/* Handle __sin6_src_id if socket not bound to an IP address */
}
/*
* Take care of the scope_id now.
*/
} else {
}
/*
* At this point the remote destination address and remote port fields
* in the tcp-four-tuple have been filled in the tcp structure. Now we
* have to see which state tcp was in so we can take appropriate action.
*/
/*
* We support a quick connect capability here, allowing
* clients to transition directly from IDLE to SYN_SENT
* tcp_bindi will pick an unused port, insert the connection
* in the bind hash and transition to BOUND state.
*/
if (lport == 0)
return (-TNOADDR);
}
/*
* Lookup the route to determine a source address and the uinfo.
*/
if (error != 0)
return (error);
/*
* Don't let an endpoint connect to itself.
*/
return (-TBADADDR);
return (ipcl_conn_insert_v6(connp));
}
/*
* Disconnect
* Note that unlike other functions this returns a positive tli error
* when it fails; it never returns an errno.
*/
static int
{
/*
* Right now, upper modules pass down a T_DISCON_REQ to TCP,
* when the stream is in BOUND state. Do not send a reset,
* since the destination IP address is not valid, and it can
* be the initialized value of all zeros (broadcast address).
*/
if (connp->conn_debug) {
}
return (TOUTSTATE);
}
/*
* According to TPI, for non-listeners, ignore seqnum
* and disconnect.
* Following interpretation of -1 seqnum is historical
* and implied TPI ? (TPI only states that for T_CONN_IND,
* a valid seqnum should not be -1).
*
* -1 means disconnect everything
* regardless even on a listener.
*/
/*
* The connection can't be on the tcp_time_wait_head list
* since it is not detached.
*/
/*
* If it used to be a listener, check to make sure no one else
* has taken the port before switching back to LISTEN state.
*/
} else {
/* Allow conn_bound_if listeners? */
ipst);
}
} else if (old_state > TCPS_BOUND) {
tcp->tcp_conn_req_max = 0;
/*
* If this end point is not going to become a listener,
* decrement the listener connection count if
* necessary. Note that we do not do this if it is
* going to be a listner (the above if case) since
* then it may remove the counter struct.
*/
}
switch (old_state) {
case TCPS_SYN_SENT:
case TCPS_SYN_RCVD:
break;
case TCPS_ESTABLISHED:
case TCPS_CLOSE_WAIT:
break;
}
if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
(tcp->tcp_conn_req_cnt_q != 0)) {
tcp_eager_cleanup(tcp, 0);
}
return (0);
return (TBADSEQ);
}
return (0);
}
/*
* Our client hereby directs us to reject the connection request
* that tcp_input_listener() marked with 'seqnum'. Rejection consists
* of sending the appropriate RST, not an ICMP error.
*/
void
{
int error;
return;
}
if (error != 0)
else {
/* Send M_FLUSH according to TPI */
}
}
}
/*
* Note: No locks are held when inspecting tcp_g_*epriv_ports
* but instead the code relies on:
* - the fact that the address of the array and its size never changes
* - the atomic assignment of the elements of the array
*/
/* ARGSUSED */
static int
{
int i;
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
if (tcps->tcps_g_epriv_ports[i] != 0)
tcps->tcps_g_epriv_ports[i]);
}
return (0);
}
/*
* Hold a lock while changing tcp_g_epriv_ports to prevent multiple
* threads from changing it at the same time.
*/
/* ARGSUSED */
static int
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
return (EINVAL);
}
/* Check if the value is already in the list */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
return (EEXIST);
}
}
/* Find an empty slot */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
if (tcps->tcps_g_epriv_ports[i] == 0)
break;
}
if (i == tcps->tcps_g_num_epriv_ports) {
return (EOVERFLOW);
}
/* Set the new value */
return (0);
}
/*
* Hold a lock while changing tcp_g_epriv_ports to prevent multiple
* threads from changing it at the same time.
*/
/* ARGSUSED */
static int
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
new_value >= 65536) {
return (EINVAL);
}
/* Check that the value is already in the list */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
break;
}
if (i == tcps->tcps_g_num_epriv_ports) {
return (ESRCH);
}
/* Clear the value */
tcps->tcps_g_epriv_ports[i] = 0;
return (0);
}
/*
* Handle reinitialization of a tcp structure.
* Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
*/
static void
{
/* tcp_reinit should never be called for detached tcp_t's */
/* Cancel outstanding timers */
/*
* Reset everything in the state vector, after updating global
* MIB data from instance counters.
*/
tcp->tcp_ibsegs = 0;
tcp->tcp_obsegs = 0;
if (tcp->tcp_snd_zcopy_aware)
if (tcp->tcp_flow_stopped &&
}
/* Free b_next chain */
tcp->tcp_rcv_cnt = 0;
}
}
}
}
}
/*
* Following is a union with two members which are
* identical types and size so the following cleanup
* is enough.
*/
/*
* The connection can't be on the tcp_time_wait_head list
* since it is not detached.
*/
if (tcp->tcp_kssl_pending) {
/* Don't reset if the initialized by bind. */
}
}
}
/*
*/
/* Note that ixa_cred gets cleared in ixa_cleanup */
if (tcp->tcp_conn_req_max != 0) {
/*
* This is the case when a TLI program uses the same
* transport end point to accept a connection. This
* makes the TCP both a listener and acceptor. When
* this connection is closed, we need to set the state
* back to TCPS_LISTEN. Make sure that the eager list
* is reinitialized.
*
* Note that this stream is still bound to the four
* tuples of the previous connection in IP. If a new
* SYN with different foreign address comes in, IP will
* not find it and will send it to the global queue. In
* the global queue, TCP will do a tcp_lookup_listener()
* to find this stream. This works because this stream
* is only removed from connected hash.
*
*/
/*
* Initially set conn_recv to tcp_input_listener_unbound to try
* to pick a good squeue for the listener when the first SYN
* arrives. tcp_input_listener_unbound sets it to
* tcp_input_listener on that first SYN.
*/
connp->conn_fport = 0;
(void) ipcl_bind_insert(connp);
} else {
}
/*
* Initialize to default values
*/
}
/*
* Force values to zero that need be zero.
* Do not touch values asociated with the BOUND or LISTEN state
* since the connection will end up in that state after the reinit.
* NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
* structure!
*/
static void
{
#ifndef lint
#define DONTCARE(x)
#define PRESERVE(x)
#else
#define DONTCARE(x) ((x) = (x))
#define PRESERVE(x) ((x) = (x))
#endif /* lint */
/* Should be ASSERT NULL on these with new code! */
connp->conn_ht_iphc_allocated = 0;
connp->conn_ht_iphc_len = 0;
connp->conn_ht_ulp_len = 0;
}
/* We clear any IP_OPTIONS and extension headers */
tcp->tcp_valid_bits = 0;
tcp->tcp_last_rcv_lbolt = 0;
tcp->tcp_init_cwnd = 0;
tcp->tcp_urp_last_valid = 0;
tcp->tcp_hard_binding = 0;
tcp->tcp_fin_acked = 0;
tcp->tcp_fin_rcvd = 0;
tcp->tcp_fin_sent = 0;
tcp->tcp_ordrel_done = 0;
tcp->tcp_detached = 0;
tcp->tcp_zero_win_probe = 0;
tcp->tcp_loopback = 0;
tcp->tcp_localnet = 0;
tcp->tcp_syn_defense = 0;
tcp->tcp_set_timer = 0;
tcp->tcp_active_open = 0;
tcp->tcp_conn_def_q0 = 0;
tcp);
}
}
tcp->tcp_rcv_ws = 0;
tcp->tcp_snd_ws = 0;
tcp->tcp_ts_recent = 0;
tcp->tcp_initial_pmtu = 0;
tcp->tcp_cwnd_cnt = 0;
tcp->tcp_rtt_update = 0;
tcp->tcp_rack_cnt = 0;
tcp->tcp_rack_cur_max = 0;
tcp->tcp_rack_abs_max = 0;
tcp->tcp_max_swnd = 0;
tcp->tcp_client_errno = 0;
tcp->tcp_last_sent_len = 0;
tcp->tcp_dupack_cnt = 0;
/* Remove any remnants of mapped address binding */
} else {
}
connp->conn_bound_if = 0;
tcp->tcp_recvifindex = 0;
tcp->tcp_recvhops = 0;
tcp->tcp_closed = 0;
tcp->tcp_hopoptslen = 0;
}
tcp->tcp_dstoptslen = 0;
}
tcp->tcp_rthdrdstoptslen = 0;
}
tcp->tcp_rthdrlen = 0;
}
/* Reset fusion-related fields */
tcp->tcp_in_ack_unsent = 0;
#ifdef DEBUG
#endif
}
void
{
/*
* Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
* will be close to tcp_rexmit_interval_initial. By doing this, we
* allow the algorithm to adjust slowly to large fluctuations of RTT
* during first few transmissions of a connection as seen in slow
* links.
*/
tcp->tcp_timer_backoff = 0;
tcp->tcp_ms_we_have_waited = 0;
/*
* Fix it to tcp_ip_abort_linterval later if it turns out to be a
* passive open.
*/
/* NOTE: ISS is now set in tcp_set_destination(). */
/* Reset fusion-related fields */
/* We rebuild the header template on the next connect/conn_request */
/*
* Init the window scale to the max so tcp_rwnd_set() won't pare
* down tcp_rwnd. tcp_set_destination() will set the right value later.
*/
/*
* Init the tcp_debug option if it wasn't already set. This value
* determines whether TCP
* calls strlog() to print out debug messages. Doing this
* initialization here means that this value is not inherited thru
* tcp_reinit().
*/
if (!connp->conn_debug)
}
/*
* Update the TCP connection according to change of PMTU.
*
* Path MTU might have changed by either increase or decrease, so need to
* adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
* or negative MSS, since tcp_mss_set() will do it.
*/
void
{
return;
return;
/*
* Always call ip_get_pmtu() to make sure that IP has updated
* ixa_flags properly.
*/
/*
* Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
* IPsec overhead if applied. Make sure to use the most recent
* IPsec information.
*/
/*
* Nothing to change, so just return.
*/
return;
/*
* Currently, for ICMP errors, only PMTU decrease is handled.
*/
return;
/*
* Update ixa_fragsize and ixa_pmtu.
*/
/*
* Adjust MSS and all relevant variables.
*/
/*
* If the PMTU is below the min size maintained by IP, then ip_get_pmtu
* has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
* has a (potentially different) min size we do the same. Make sure to
* clear IXAF_DONTFRAG, which is used by IP to decide whether to
* fragment the packet.
*
* LSO over IPv6 can not be fragmented. So need to disable LSO
* when IPv6 fragmentation is needed.
*/
if (ixaflags & IXAF_PMTU_TOO_SMALL)
!(ixaflags & IXAF_PMTU_IPV4_DF)) {
}
}
int
{
int maxpsz;
if (TCP_IS_DETACHED(tcp))
return (mss);
} else if (tcp->tcp_maxpsz_multiplier == 0) {
/*
* Set the sd_qn_maxpsz according to the socket send buffer
* size, and sd_maxblk to INFPSZ (-1). This will essentially
* instruct the stream head to copyin user data into contiguous
* kernel-allocated buffers without breaking it up into smaller
* chunks. We round up the buffer size to the nearest SMSS.
*/
else
} else {
/*
* Set sd_qn_maxpsz to approx half the (receivers) buffer
* (and a multiple of the mss). This instructs the stream
* head to break down larger than SMSS writes into SMSS-
* size mblks, up to tcp_maxpsz_multiplier mblks at a time.
*/
/* Round up to nearest mss */
}
}
if (!(IPCL_IS_NONSTR(connp)))
if (set_maxblk)
return (mss);
}
static int
{
}
static int
{
}
conn_t *
int *errorp)
{
/*
* Find the proper zoneid and netstack.
*/
/*
* Special case for install: miniroot needs to be able to
* access files via NFS as though it were always in the
* global zone.
*/
} else {
netstack_t *ns;
int err;
return (NULL);
}
/*
* For exclusive stacks we set the zoneid to zero
* to make TCP operate as if in the global zone.
*/
else
}
/*
* Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
* so we drop it by one.
*/
return (NULL);
}
/*
* Besides asking IP to set the checksum for us, have conn_ip_output
* to do the following checks when necessary:
*
* IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
* IXAF_VERIFY_PMTU: verify PMTU changes
* IXAF_VERIFY_LSO: verify LSO capability changes
*/
if (!tcps->tcps_dev_flow_ctl)
if (isv6) {
} else {
}
/* Cache things in the ixa without any refhold */
/* conn_allzones can not be set this early, hence no IPCL_ZONEID */
/*
* If the caller has the process-wide flag set, then default to MAC
* exempt mode. This allows read-down to unlabeled hosts.
*/
if (issocket) {
}
return (connp);
}
static int
{
int err;
return (0);
return (EINVAL);
} else {
/*
* Either minor numbers in the large arena were exhausted
* or a non socket application is doing the open.
* Try to allocate from the small arena.
*/
return (EBUSY);
}
}
if (flag & SO_FALLBACK) {
/*
* Non streams socket needs a stream to fallback to
*/
qprocson(q);
return (0);
} else if (flag & SO_ACCEPTOR) {
q->q_qinfo = &tcp_acceptor_rinit;
/*
* the conn_dev and minor_arena will be subsequently used by
* tcp_tli_accept() and tcp_tpi_close_accept() to figure out
* the minor device number for this connection from the q_ptr.
*/
qprocson(q);
return (0);
}
return (err);
}
if (issocket) {
} else {
#ifdef _ILP32
#else
#endif /* _ILP32 */
}
/*
* Put the ref for TCP. Ref for IP was already put
* by ipcl_conn_create. Also Make the conn_t globally
* visible to walkers
*/
qprocson(q);
return (0);
}
/*
* conn_xmit_ipp. The headers include ip6_t, any extension
* headers, and the maximum size tcp header (to avoid reallocation
* on the fly for additional tcp options).
*
* Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
* Returns failure if can't allocate memory.
*/
int
{
char buf[TCP_MAX_HDR_LENGTH];
int error;
/*
* We might be called after the connection is set up, and we might
* have TS options already in the TCP header. Thus we save any
* existing tcp header.
*/
if (buflen != 0) {
}
/* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
if (error != 0)
return (error);
/*
* is stored in conn_sum for later use.
*/
/* restore any old tcp header */
if (buflen != 0) {
} else {
}
/*
* IP wants our header length in the checksum field to
* allow it to perform a single pseudo-header+checksum
* calculation on behalf of TCP.
* Include the adjustment for a source route once IP_OPTIONS is set.
*/
else
connp->conn_wroff) {
connp->conn_wroff);
}
return (0);
}
/* Get callback routine passed to nd_load by tcp_param_register */
/* ARGSUSED */
static int
{
return (0);
}
/*
* Walk through the param array specified registering each element with the
* named dispatch handler.
*/
static boolean_t
{
return (B_FALSE);
}
}
}
KM_SLEEP);
sizeof (tcpparam_t));
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
tcp_1948_phrase_set, NULL)) {
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
/*
* Dummy ndd variables - only to convey obsolescence information
* through printing of their name (no get or set routines)
* XXX Remove in future releases ?
*/
"tcp_close_wait_interval(obsoleted - "
return (B_FALSE);
}
return (B_TRUE);
}
/* ndd set routine for tcp_wroff_xtra. */
/* ARGSUSED */
static int
{
long new_value;
return (EINVAL);
}
/*
* Need to make sure new_value is a multiple of 4. If it is not,
* round it up. For future 64 bit requirement, we actually make it
* a multiple of 8.
*/
if (new_value & 0x7) {
}
return (0);
}
/* Set callback routine passed to nd_load by tcp_param_register */
/* ARGSUSED */
static int
{
long new_value;
return (EINVAL);
}
return (0);
}
/*
* tcp_rwnd_set() is called to adjust the receive window to a desired value.
* We do not allow the receive window to shrink. After setting rwnd,
* set the flow control hiwat of the stream.
*
* This function is called in 2 cases:
*
* 1) Before data transfer begins, in tcp_input_listener() for accepting a
* connection (passive open) and in tcp_input_data() for active connect.
* This is called after tcp_mss_set() when the desired MSS value is known.
* This makes sure that our window size is a mutiple of the other side's
* MSS.
* 2) Handling SO_RCVBUF option.
*
* It is ASSUMED that the requested size is a multiple of the current MSS.
*
* XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
* user requests so.
*/
int
{
/*
* Insist on a receive window that is at least
* tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
* funny TCP interactions of Nagle algorithm, SWS avoidance
* and delayed acknowledgement.
*/
if (!tcp_detached) {
}
/* Caller could have changed tcp_rwnd; update tha_win */
}
/*
* In the fusion case, the maxpsz stream head value of
* our peer is set according to its send buffer size
* and our receive buffer size; since the latter may
* have changed we need to update the peer's maxpsz.
*/
return (sth_hiwat);
}
if (tcp_detached)
else
/*
* If window size info has already been exchanged, TCP should not
* shrink the window. Shrinking window is doable if done carefully.
* We may add that support later. But so far there is not a real
* need to do that.
*/
/* MSS may have changed, do a round up again. */
}
/*
* tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
* can be applied even before the window scale option is decided.
*/
if (rwnd > max_transmittable_rwnd) {
/*
* If we're over the limit we may have to back down tcp_rwnd.
* The increment below won't work for us. So we set all three
* here and the increment below will have no effect.
*/
}
if (tcp->tcp_localnet) {
} else {
/*
* For a remote host on a different subnet (through a router),
* we ack every other packet to be conforming to RFC1122.
* tcp_deferred_acks_max is default to 2.
*/
}
else
tcp->tcp_rack_cur_max = 0;
/*
* Increment the current rwnd by the amount the maximum grew (we
* can not overwrite it since we might be in the middle of a
* connection.)
*/
/* Are we already connected? */
}
if (tcp_detached)
return (rwnd);
return (rwnd);
}
int
{
case TCPS_BOUND:
case TCPS_LISTEN:
break;
default:
return (-TOUTSTATE);
}
/*
* Need to clean up all the eagers since after the unbind, segments
* will no longer be delivered to this listener stream.
*/
tcp_eager_cleanup(tcp, 0);
}
/* Clean up the listener connection counter if necessary. */
return (0);
}
/*
* This runs at the tail end of accept processing on the squeue of the
* new connection.
*/
/* ARGSUSED */
void
{
/* socket options */
struct sock_proto_props sopp;
/* We should just receive a single mblk that fits a T_discon_ind */
/*
* Drop the eager's ref on the listener, that was placed when
* this eager began life in tcp_input_listener.
*/
if (IPCL_IS_NONSTR(connp)) {
/* Safe to free conn_ind message */
}
/*
* Someone blewoff the eager before we could finish
* the accept.
*
* The only reason eager exists it because we put in
* a ref on it when conn ind went up. We need to send
* a disconnect indication up while the last reference
* on the eager will be dropped by the squeue when we
* return.
*/
if (IPCL_IS_NONSTR(connp)) {
} else {
struct T_discon_ind *tdi;
/*
* Let us reuse the incoming mblk to avoid
* memory allocation failure problems. We know
* that the size of the incoming mblk i.e.
* stroptions is greater than sizeof
* T_discon_ind.
*/
sizeof (struct T_discon_ind));
if (tcp->tcp_issocket) {
tdi->SEQ_number = 0;
} else {
tdi->SEQ_number =
}
sizeof (struct T_discon_ind);
}
}
return;
}
/*
* This is the first time we run on the correct
* queue after tcp_accept. So fix all the q parameters
* here.
*/
/*
* Determine what write offset value to use depending on SACK and
* whether the endpoint is fused or not.
*/
/*
* For fused tcp loopback, set the stream head's write
* offset value to zero since we won't be needing any room
* since it would reduce the amount of work done by kmem.
* Non-fused tcp loopback case is handled separately below.
*/
sopp.sopp_wroff = 0;
/*
* Update the peer's transmit parameters according to
* our recently calculated high water mark value.
*/
} else if (tcp->tcp_snd_sack_ok) {
} else {
}
/*
* If this is endpoint is handling SSL, then reserve extra
* offset and space at the end.
* Also have the stream head allocate SSL3_MAX_RECORD_LEN packets,
* overriding the previous setting. The extra cost of signing and
* encrypting multiple MSS-size records (12 of them with Ethernet),
* instead of a single contiguous one by the stream head
* largely outweighs the statistical reduction of ACKs, when
* applicable. The peer will also save on decryption and verification
* costs.
*/
}
/* Send the options up */
if (IPCL_IS_NONSTR(connp)) {
}
if (tcp->tcp_loopback) {
}
} else {
/*
* Let us reuse the incoming mblk to avoid
* memory allocation failure problems. We know
* that the size of the incoming mblk is at least
* stroptions
*/
struct stroptions *stropt;
}
/* Send the options up */
}
/*
*
* Adjust receive window in case it had decreased
* (because there is data <=> tcp_rcv_list != NULL)
* while the connection was detached. Note that
* in case the eager was flow-controlled, w/o this
* code, the rwnd may never open up again!
*/
if (IPCL_IS_NONSTR(connp)) {
int space_left;
int error;
&push) >= 0) {
}
}
if (space_left < 0) {
/*
* We should never be in middle of a
* fallback, the squeue guarantees that.
*/
}
}
tcp->tcp_rcv_cnt = 0;
} else {
/* We drain directly in case of fused tcp loopback */
}
}
(void) tcp_rcv_drain(tcp);
}
/*
* For fused tcp loopback, back-enable peer endpoint
* if it's currently flow-controlled.
*/
if (peer_tcp->tcp_flow_stopped) {
}
}
}
if (IPCL_IS_NONSTR(connp)) {
SOCK_OPCTL_SHUT_RECV, 0);
} else {
}
}
if (connp->conn_keepalive) {
tcp->tcp_ka_last_intrvl = 0;
}
/*
* At this point, eager is fully established and will
* have the following references -
*
* 2 references for connection to exist (1 for TCP and 1 for IP).
* 1 reference for the squeue which will be dropped by the squeue as
* soon as this function returns.
* There will be 1 additonal reference for being in classifier
* hash list provided something bad hasn't happened.
*/
}
/*
* Common to TPI and sockfs accept code.
*/
/* ARGSUSED2 */
int
{
/*
* Pre allocate the discon_ind mblk also. tcp_accept_finish will
* use it if something failed.
*/
sizeof (struct stroptions)), BPRI_HI);
return (-TPROTO);
}
/* Put the ref for IP */
/*
* We should have minimum of 3 references on the conn
* at this point. One each for TCP and IP and one for
* the T_conn_ind that was sent up when the 3-way handshake
* completed. In the normal case we would also have another
* reference (making a total of 4) for the conn being in the
* classifier hash list. However the eager could have received
* an RST subsequently and tcp_closei_local could have removed
* the eager from the classifier hash list, hence we can't
* assert that reference.
*/
/*
* listener->tcp_eager_prev_q0 points to the TAIL of the
* deferred T_conn_ind queue. We need to get to the head
* of the queue in order to send up T_conn_ind the same
* order as how the 3WHS is completed.
*/
break;
else
}
/* None of the pending eagers can be sent up now */
goto no_more_eagers;
/* Move from q0 to q */
/* Make sure the tcp isn't in the list of droppables */
/*
* Insert at end of the queue because sockfs sends
* down T_CONN_RES in chronological order. Leaving
* the older conn indications at front of the queue
* helps reducing search time.
*/
} else {
}
/* Need to get inside the listener perimeter */
}
/*
* At this point, the eager is detached from the listener
* but we still have an extra refs on eager (apart from the
* usual tcp references). The ref was placed in tcp_input_data
* before sending the conn_ind in tcp_send_conn_ind.
* The ref will be dropped in tcp_accept_finish().
*/
return (0);
}
/*
* Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
*/
{
if (do_tcpzcopy == 2)
zc_enabled = B_TRUE;
zc_enabled = B_TRUE;
if (!TCP_IS_DETACHED(tcp)) {
if (zc_enabled) {
ZCVMSAFE);
} else {
}
}
return (zc_enabled);
}
/*
* Backoff from a zero-copy message by copying data to a new allocated
* message and freeing the original desballoca'ed segmapped message.
*
* This function is called by following two callers:
* 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
* the origial desballoca'ed message and notify sockfs. This is in re-
* transmit state.
* 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
* to be copied to new message.
*/
mblk_t *
{
if (IS_VMLOANED_MBLK(bp)) {
}
if (fix_xmitlist)
else
}
/*
* Copy saved information and adjust tcp_xmit_tail
* if needed.
*/
if (fix_xmitlist) {
}
/* Free the original message. */
}
}
} else {
}
/* Move forward. */
}
if (fix_xmitlist) {
}
return (head);
}
void
{
if (tcp->tcp_detached)
return;
if (IPCL_IS_NONSTR(connp)) {
return;
}
}
/*
* Update the TCP connection according to change of LSO capability.
*/
static void
{
/*
* We check against IPv4 header length to preserve the old behavior
* of only enabling LSO when there are no IP options.
* But this restriction might not be necessary at all. Before removing
* it, need to verify how LSO is handled for source routing case, with
* which IP does software checksum.
*
* For IPv6, whenever any extension header is needed, LSO is supressed.
*/
return;
/*
* Either the LSO capability newly became usable, or it has changed.
*/
/*
* If LSO to be enabled, notify the STREAM header with larger
* data block.
*/
tcp->tcp_maxpsz_multiplier = 0;
} else { /* LSO capability is not usable any more. */
/*
* If LSO to be disabled, notify the STREAM header with smaller
* data block. And need to restore fragsize to PMTU.
*/
}
}
}
/*
* Update the TCP connection according to change of ZEROCOPY capability.
*/
static void
{
if (tcp->tcp_snd_zcopy_on) {
if (!TCP_IS_DETACHED(tcp)) {
}
} else {
if (!TCP_IS_DETACHED(tcp)) {
ZCVMSAFE);
}
}
}
/*
* Notify function registered with ip_xmit_attr_t. It's called in the squeue
* so it's safe to update the TCP connection.
*/
/* ARGSUSED1 */
static void
{
switch (ntype) {
case IXAN_LSO:
break;
case IXAN_PMTU:
break;
case IXAN_ZCOPY:
break;
default:
break;
}
}
/*
* The TCP write service routine should never be called...
*/
/* ARGSUSED */
static void
{
}
/*
* Hash list lookup routine for tcp_t structures.
* Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
*/
tcp_t *
{
return (tcp);
}
}
return (NULL);
}
/*
* Hash list insertion routine for tcp_t structures.
*/
void
{
if (tcpnext)
}
/*
* Hash list removal routine for tcp_t structures.
*/
void
{
/*
* Extract the lock pointer in case there are concurrent
* hash_remove's for this instance.
*/
return;
if (tcp->tcp_ptpahn) {
if (tcpnext) {
}
}
}
/*
* Type three generator adapted from the random() function in 4.4 BSD:
*/
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Type 3 -- x**31 + x**3 + 1 */
#define DEG_3 31
#define SEP_3 3
/* Protected by tcp_random_lock */
void
tcp_random_init(void)
{
int i;
/*
* Use high-res timer and current time for seed. Gethrtime() returns
* a longlong, which may contain resolution down to nanoseconds.
* The current time will either be a 32-bit or a 64-bit quantity.
* XOR the two together in a 64-bit result variable.
* Convert the result to a 32-bit value by multiplying the high-order
* 32-bits by the low-order 32-bits.
*/
(result & 0xffffffff);
for (i = 1; i < DEG_3; i++)
+ 12345;
tcp_random_rptr = &tcp_random_state[0];
for (i = 0; i < 10 * DEG_3; i++)
(void) tcp_random();
}
/*
* tcp_random: Return a random number in the range [1 - (128K + 1)].
* This range is selected to be approximately centered on TCP_ISS / 2,
* and easy to compute. We get this value by generating a 32-bit random
* number, selecting out the high-order 17 bits, and then adding one so
* that we never return zero.
*/
int
tcp_random(void)
{
int i;
/*
* The high-order bits are more random than the low-order bits,
* so we select out the high-order 17 bits and add one so that
* we never return zero.
*/
if (++tcp_random_fptr >= tcp_random_end_ptr) {
} else if (++tcp_random_rptr >= tcp_random_end_ptr)
return (i);
}
/*
* Split this function out so that if the secret changes, I'm okay.
*
* Initialize the tcp_iss_cookie and tcp_iss_key.
*/
static void
{
struct {
time_t t;
/*
* Start with the current absolute time.
*/
(void) drv_getparm(TIME, &t);
/*
* XXX - Need a more random number per RFC 1750, not this crap.
* OTOH, if what follows is pretty random, then I'm in better shape.
*/
/*
* The cpu_type_info is pretty non-random. Ugggh. It does serve
* as a good template.
*/
/*
* The pass-phrase. Normally this is supplied by user-called NDD.
*/
/*
* See 4010593 if this section becomes a problem again,
* but the local ethernet address is useful here.
*/
(void) localetheraddr(NULL,
/*
* Hash 'em all together. The MD5Final is called per-connection.
*/
sizeof (tcp_iss_cookie));
}
/*
* Set the RFC 1948 pass phrase
*/
/* ARGSUSED */
static int
{
/*
* Basically, value contains a new pass phrase. Pass it along!
*/
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/*
* Called by IP when IP is loaded into the kernel
*/
void
tcp_ddi_g_init(void)
{
sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
sizeof (tcp_sack_info_t), 0,
/* Initialize the random number generator */
/* A single callback independently of how many netstacks we have */
/*
* We want to be informed each time a stack is created or
* destroyed in the kernel, so we can maintain the
* set of tcp_stack_t's.
*/
}
#define INET_NAME "ip"
/*
* Initialize the TCP stack instance.
*/
static void *
{
tcpparam_t *pa;
int i;
int error = 0;
/* Initialize locks */
for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
}
for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
}
/* TCP's IPsec code calls the packet dropper. */
/*
* Note: To really walk the device tree you need the devinfo
* The following is safe only because it uses ddi_root_node()
*/
/*
* Initialize RFC 1948 secret values. This will probably be reset once
* by the boot scripts.
*
* Use NULL name, as the name is caught by the new lockstats.
*
* Initialize with some random, non-guessable string, like the global
* T_INFO_ACK.
*/
sizeof (tcp_g_t_info_ack), tcps);
tcps->tcps_reclaim_tid = 0;
/*
* ncpus is the current number of CPUs, which can be bigger than
* boot_ncpus. But we don't want to use ncpus to allocate all the
* tcp_stats_cpu_t at system boot up time since it will be 1. While
* we handle adding CPU in tcp_cpu_update(), it will be slow if
* there are many CPUs as we will be adding them 1 by 1.
*
* Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
* are not freed until the stack is going away. So there is no need
* to grab a lock to access the per CPU tcps_sc[x] pointer.
*/
KM_SLEEP);
for (i = 0; i < tcps->tcps_sc_cnt; i++) {
KM_SLEEP);
}
return (tcps);
}
/*
* Called when the IP module is about to be unloaded.
*/
void
tcp_ddi_g_destroy(void)
{
tcp_g_kstat = NULL;
}
/*
* Free the TCP stack instance.
*/
static void
{
int i;
/*
* Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
* the timer.
*/
if (tcps->tcps_reclaim_tid != 0)
for (i = 0; i < tcps->tcps_sc_cnt; i++)
for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
}
for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
}
}
/*
* Generate ISS, taking into account NDD changes may happen halfway through.
* (If the iss is not zero, set it.)
*/
static void
{
switch (tcps->tcps_strong_iss) {
case 2:
/*
* Now that we've hashed into a unique per-connection sequence
* space, add a random increment per strong_iss == 1. So I
* guess we'll have to...
*/
/* FALLTHRU */
case 1:
break;
default:
break;
}
}
/*
* tcp_{set,clr}qfull() functions are used to either set or clear QFULL
* on the specified backing STREAMS q. Note, the caller may make the
* decision to call based on the tcp_t.tcp_flow_stopped value which
* when check outside the q's lock is only an advisory check ...
*/
void
{
if (tcp->tcp_closed)
return;
if (tcp->tcp_flow_stopped)
}
void
{
if (tcp->tcp_closed)
return;
}
static int
tcp_squeue_switch(int val)
{
switch (val) {
case 1:
rval = SQ_NODRAIN;
break;
case 2:
rval = SQ_PROCESS;
break;
default:
break;
}
return (rval);
}
/*
* This is called once for each squeue - globally for all stack
* instances.
*/
static void
{
sizeof (tcp_squeue_priv_t), KM_SLEEP);
/* Kick start the periodic TIME WAIT collector. */
if (tcp_free_list_max_cnt == 0) {
/*
* Limit number of entries to 1% of availble memory / tcp_ncpus
*/
}
}
/*
* Return unix error is tli error is TSYSERR, otherwise return a negative
* tli error.
*/
int
{
int error;
if (connp->conn_debug) {
}
return (-TOUTSTATE);
}
if (error != 0)
return (error);
tcp->tcp_conn_req_max = 0;
return (0);
}
/*
* If the return value from this function is positive, it's a UNIX error.
* Otherwise, if it's negative, then the absolute value is a TLI error.
* the TPI routine tcp_tpi_connect() is a wrapper function for this.
*/
int
{
int error;
switch (len) {
default:
/*
* Should never happen
*/
return (EINVAL);
case sizeof (sin_t):
return (-TBADADDR);
}
if (connp->conn_ipv6_v6only) {
return (EAFNOSUPPORT);
}
break;
case sizeof (sin6_t):
return (-TBADADDR);
}
break;
}
/*
* If we're connecting to an IPv4-mapped IPv6 address, we need to
* make sure that the conn_ipversion is IPV4_VERSION. We
* need to this before we call tcp_bindi() so that the port lookup
* code will look for ports in the correct port space (IPv4 and
* IPv6 have separate port spaces).
*/
if (connp->conn_ipv6_v6only)
return (EADDRNOTAVAIL);
}
case TCPS_LISTEN:
/*
* Listening sockets are not allowed to issue connect().
*/
if (IPCL_IS_NONSTR(connp))
return (EOPNOTSUPP);
/* FALLTHRU */
case TCPS_IDLE:
/*
* We support quick connect, refer to comments in
* tcp_connect_*()
*/
/* FALLTHRU */
case TCPS_BOUND:
break;
default:
return (-TOUTSTATE);
}
/*
*/
}
/* Cache things in the ixa without any refhold */
if (is_system_labeled()) {
/* We need to restart with a label based on the cred */
}
} else {
/*
* Destination adress is mapped IPv6 address.
* Source bound address should be unspecified or
* IPv6 mapped address as well.
*/
if (!IN6_IS_ADDR_UNSPECIFIED(
&connp->conn_bound_addr_v6) &&
return (EADDRNOTAVAIL);
}
srcid);
}
} else {
srcid = 0;
}
if (error != 0)
goto connect_failed;
if (error != 0)
goto connect_failed;
/* connect succeeded */
/*
*/
/*
* Just make sure our rwnd is at least rcvbuf * MSS large, and round up
* to the nearest MSS.
*
* We do the round up here because we need to get the interface MTU
* first before we can do the round up.
*/
/*
* Set tcp_snd_ts_ok to true
* so that tcp_xmit_mp will
* include the timestamp
* option in the SYN segment.
*/
if (tcps->tcps_tstamp_always ||
}
/*
* tcp_snd_sack_ok can be set in
* tcp_set_destination() if the sack metric
* is set. So check it here also.
*/
tcp->tcp_snd_sack_ok) {
}
}
/*
* Should we use ECN? Note that the current
* default value (SunOS 5.9) of tcp_ecn_permitted
* is 1. The reason for doing this is that there
* are equipments out there that will drop ECN
* enabled IP packets. Setting it to 1 avoids
* compatibility problems.
*/
/*
* We must bump the generation before sending the syn
* to ensure that we use the right generation in case
* this thread issues a "connected" up call.
*/
}
return (0);
connp->conn_fport = 0;
return (error);
}
int
{
int error = 0;
/* All Solaris components should pass a cred for this operation. */
/*
* Handle listen() increasing backlog.
* This is more "liberal" then what the TPI spec
* requires but is needed to avoid a t_unbind
* when handling listen() since the port number
* might be "stolen" between the unbind and bind.
*/
goto do_listen;
}
if (connp->conn_debug) {
}
return (-TOUTSTATE);
} else {
/* Do an implicit bind: Request for a generic port. */
} else {
}
}
if (error)
return (error);
/* Fall through and do the fanout insertion */
}
if (tcp->tcp_conn_req_max) {
/*
* If this is a listener, do not reset the eager list
* and other stuffs. Note that we don't check if the
* existing eager list meets the new tcp_conn_req_max
* requirement.
*/
/* Initialize the chain. Don't need the eager_lock */
}
}
/*
* We need to make sure that the conn_recv is set to a non-null
* value before we insert the conn into the classifier table.
* This is to avoid a race with an incoming packet which does an
* ipcl_classify().
* We initially set it to tcp_input_listener_unbound to try to
* pick a good squeue for the listener when the first SYN arrives.
* tcp_input_listener_unbound sets it to tcp_input_listener on that
* first SYN.
*/
/* Insert the listener in the classifier table */
if (error != 0) {
/* Undo the bind - release the port number */
connp->conn_ports = 0;
if (connp->conn_anon_port) {
}
return (error);
} else {
/*
* If there is a connection limit, allocate and initialize
* the counter struct. Note that since listen can be called
* multiple times, the struct may have been allready allocated.
*/
if (ratio != 0) {
KM_SLEEP);
/*
* Calculate the connection limit based on
* the configured ratio and maxusers. Maxusers
* are calculated based on memory size,
* ~ 1 user per MB. Note that the conn_rcvbuf
* and conn_sndbuf may change after a
* connection is accepted. So what we have
* is only an approximation.
*/
} else {
}
/* At least we should allow two connections! */
}
}
}
return (error);
}