tcp.c revision 8c0bf40606925ed935ffe66e78665e0a32791e48
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
#pragma ident "%Z%%M% %I% %E% SMI"
const char tcp_version[] = "%Z%%M% %I% %E% SMI";
#define _SUN_TPI_VERSION 2
#include <sys/xti_inet.h>
#include <sys/ethernet.h>
#include <sys/multidata.h>
#include <sys/multidata_impl.h>
#include <sys/isa_defs.h>
#include <sys/sodirect.h>
#include <inet/ipsec_impl.h>
#include <inet/kstatcom.h>
#include <inet/tcp_impl.h>
#include <inet/ipsec_info.h>
#include <inet/tcp_trace.h>
#include <inet/ipclassifier.h>
#include <inet/ip_ftable.h>
#include <inet/ipp_common.h>
#include <inet/ip_netinfo.h>
#include <rpc/pmap_prot.h>
/*
* TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
*
* (Read the detailed design doc in PSARC case directory)
*
* The entire tcp state is contained in tcp_t and conn_t structure
* which are allocated in tandem using ipcl_conn_create() and passing
* IPCL_CONNTCP as a flag. We use 'conn_ref' and 'conn_lock' to protect
* the references on the tcp_t. The tcp_t structure is never compressed
* and packets always land on the correct TCP perimeter from the time
* eager is created till the time tcp_t dies (as such the old mentat
* TCP global queue is not used for detached state and no IPSEC checking
* is required). The global queue is still allocated to send out resets
* for connection which have no listeners and IP directly calls
* tcp_xmit_listeners_reset() which does any policy check.
*
* Protection and Synchronisation mechanism:
*
* The tcp data structure does not use any kind of lock for protecting
* its state but instead uses 'squeues' for mutual exclusion from various
* read and write side threads. To access a tcp member, the thread should
* always be behind squeue (via squeue_enter, squeue_enter_nodrain, or
* squeue_fill). Since the squeues allow a direct function call, caller
* can pass any tcp function having prototype of edesc_t as argument
* (different from traditional STREAMs model where packets come in only
* designated entry points). The list of functions that can be directly
* called via squeue are listed before the usual function prototype.
*
* Referencing:
*
* TCP is MT-Hot and we use a reference based scheme to make sure that the
* tcp structure doesn't disappear when its needed. When the application
* creates an outgoing connection or accepts an incoming connection, we
* start out with 2 references on 'conn_ref'. One for TCP and one for IP.
* The IP reference is just a symbolic reference since ip_tcpclose()
* looks at tcp structure after tcp_close_output() returns which could
* have dropped the last TCP reference. So as long as the connection is
* in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
* conn_t. The classifier puts its own reference when the connection is
* inserted in listen or connected hash. Anytime a thread needs to enter
* on write side or by doing a classify on read side and then puts a
* reference on the conn before doing squeue_enter/tryenter/fill. For
* read side, the classifier itself puts the reference under fanout lock
* to make sure that tcp can't disappear before it gets processed. The
* squeue will drop this reference automatically so the called function
* doesn't have to do a DEC_REF.
*
* Opening a new connection:
*
* The outgoing connection open is pretty simple. tcp_open() does the
* squeue assignment is done based on the CPU the application
* is running on. So for outbound connections, processing is always done
* on application CPU which might be different from the incoming CPU
* being interrupted by the NIC. An optimal way would be to figure out
* the NIC <-> CPU binding at listen time, and assign the outgoing
* connection to the squeue attached to the CPU that will be interrupted
* for incoming packets (we know the NIC based on the bind IP address).
* This might seem like a problem if more data is going out but the
* fact is that in most cases the transmit is ACK driven transmit where
* the outgoing data normally sits on TCP's xmit queue waiting to be
* transmitted.
*
* Accepting a connection:
*
* This is a more interesting case because of various races involved in
* establishing a eager in its own perimeter. Read the meta comment on
* top of tcp_conn_request(). But briefly, the squeue is picked by
* ip_tcp_input()/ip_fanout_tcp_v6() based on the interrupted CPU.
*
* Closing a connection:
*
* The close is fairly straight forward. tcp_close() calls tcp_close_output()
* via squeue to do the close and mark the tcp as detached if the connection
* was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
* reference but tcp_close() drop IP's reference always. So if tcp was
* not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
* and 1 because it is in classifier's connected hash. This is the condition
* we use to determine that its OK to clean up the tcp outside of squeue
* when time wait expires (check the ref under fanout and conn_lock and
* if it is 2, remove it from fanout hash and kill it).
*
* Although close just drops the necessary references and marks the
* tcp_detached state, tcp_close needs to know the tcp_detached has been
* set (under squeue) before letting the STREAM go away (because a
* inbound packet might attempt to go up the STREAM while the close
* has happened and tcp_detached is not set). So a special lock and
* flag is used along with a condition variable (tcp_closelock, tcp_closed,
* and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
* tcp_detached.
*
* Special provisions and fast paths:
*
* We make special provision for (AF_INET, SOCK_STREAM) sockets which
* can't have 'ipv6_recvpktinfo' set and for these type of sockets, IP
* will never send a M_CTL to TCP. As such, ip_tcp_input() which handles
* all TCP packets from the wire makes a IPCL_IS_TCP4_CONNECTED_NO_POLICY
* check to send packets directly to tcp_rput_data via squeue. Everyone
* else comes through tcp_input() on the read side.
*
* We also make special provisions for sockfs by marking tcp_issocket
* whenever we have only sockfs on top of TCP. This allows us to skip
* putting the tcp in acceptor hash since a sockfs listener can never
* become acceptor and also avoid allocating a tcp_t for acceptor STREAM
* since eager has already been allocated and the accept now happens
* on acceptor STREAM. There is a big blob of comment on top of
* tcp_conn_request explaining the new accept. When socket is POP'd,
* sockfs sends us an ioctl to mark the fact and we go back to old
* behaviour. Once tcp_issocket is unset, its never set for the
* life of that connection.
*
* two consoldiation private KAPIs are used to enqueue M_DATA mblk_t's
* directly to the socket (sodirect) and start an asynchronous copyout
* to a user-land receive-side buffer (uioa) when a blocking socket read
* (e.g. read, recv, ...) is pending.
*
* This is accomplished when tcp_issocket is set and tcp_sodirect is not
* NULL so points to an sodirect_t and if marked enabled then we enqueue
* all mblk_t's directly to the socket.
*
* Further, if the sodirect_t sod_uioa and if marked enabled (due to a
* blocking socket read, e.g. user-land read, recv, ...) then an asynchronous
* copyout will be started directly to the user-land uio buffer. Also, as we
* have a pending read, TCP's push logic can take into account the number of
* bytes to be received and only awake the blocked read()er when the uioa_t
* byte count has been satisfied.
*
* IPsec notes :
*
* Since a packet is always executed on the correct TCP perimeter
* all IPsec processing is defered to IP including checking new
* connections and setting IPSEC policies for new connection. The
* only exception is tcp_xmit_listeners_reset() which is called
* directly from IP and needs to policy check to see if TH_RST
* can be sent out.
*
* PFHooks notes :
*
* For mdt case, one meta buffer contains multiple packets. Mblks for every
* packet are assembled and passed to the hooks. When packets are blocked,
* or boundary of any packet is changed, the mdt processing is stopped, and
* packets of the meta buffer are send to the IP path one by one.
*/
/*
* Values for squeue switch:
* 1: squeue_enter_nodrain
* 2: squeue_enter
* 3: squeue_fill
*/
int tcp_squeue_wput = 2;
/*
* Macros for sodirect:
*
* SOD_PTR_ENTER(tcp, sodp) - for the tcp_t pointer "tcp" set the
* if it exists and is enabled, else to NULL. Note, in the current
* sodirect implementation the sod_lock must not be held across any
* STREAMS call (e.g. putnext) else a "recursive mutex_enter" PANIC
* will result as sod_lock is the streamhead stdata.sd_lock.
*
* SOD_NOT_ENABLED(tcp) - return true if not a sodirect tcp_t or the
* sodirect_t isn't enabled, usefull for ASSERT()ing that a recieve
* side tcp code path dealing with a tcp_rcv_list or putnext() isn't
* being used when sodirect code paths should be.
*/
\
} \
}
#define SOD_NOT_ENABLED(tcp) \
/*
* This controls how tiny a write must be before we try to copy it
* into the the mblk on the tail of the transmit queue. Not much
* speedup is observed for values larger than sixteen. Zero will
* disable the optimisation.
*/
int tcp_tx_pull_len = 16;
/*
* TCP Statistics.
*
* How TCP statistics work.
*
* There are two types of statistics invoked by two macros.
*
* TCP_STAT(name) does non-atomic increment of a named stat counter. It is
* supposed to be used in non MT-hot paths of the code.
*
* TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is
* supposed to be used for DEBUG purposes and may be used on a hot path.
*
* Both TCP_STAT and TCP_DBGSTAT counters are available using kstat
* (use "kstat tcp" to get them).
*
* There is also additional debugging facility that marks tcp_clean_death()
* instances and saves them in tcp_t structure. It is triggered by
* TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for
* tcp_clean_death() calls that counts the number of times each tag was hit. It
* is triggered by TCP_CLD_COUNTERS define.
*
* How to add new counters.
*
* 1) Add a field in the tcp_stat structure describing your counter.
* 2) Add a line in the template in tcp_kstat2_init() with the name
* of the counter.
*
* IMPORTANT!! - make sure that both are in sync !!
* 3) Use either TCP_STAT or TCP_DBGSTAT with the name.
*
* Please avoid using private counters which are not kstat-exported.
*
* TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances
* in tcp_t structure.
*
* TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags.
*/
#ifndef TCP_DEBUG_COUNTER
#ifdef DEBUG
#define TCP_DEBUG_COUNTER 1
#else
#define TCP_DEBUG_COUNTER 0
#endif
#endif
#define TCP_CLD_COUNTERS 0
#define TCP_TAG_CLEAN_DEATH 1
#define TCP_MAX_CLEAN_DEATH_TAG 32
#ifdef lint
static int _lint_dummy_;
#endif
#if TCP_CLD_COUNTERS
#define TCP_CLD_STAT(x) tcp_clean_death_stat[x]++
#else
#define TCP_CLD_STAT(x)
#endif
#define TCP_DBGSTAT(tcps, x) \
#define TCP_G_DBGSTAT(x) \
#else
#define TCP_DBGSTAT(tcps, x)
#define TCP_G_DBGSTAT(x)
#endif
/*
* Call either ip_output or ip_output_v6. This replaces putnext() calls on the
* tcp write side.
*/
tcp_stack_t *tcps; \
\
}
/* Macros for timestamp comparisons */
/*
* Parameters for TCP Initial Send Sequence number (ISS) generation. When
* tcp_strong_iss is set to 1, which is the default, the ISS is calculated
* by adding three components: a time component which grows by 1 every 4096
* nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27);
* a per-connection component which grows by 125000 for every new connection;
* and an "extra" component that grows by a random amount centered
* approximately on 64000. This causes the the ISS generator to cycle every
* 4.89 hours if no TCP connections are made, and faster if connections are
* made.
*
* When tcp_strong_iss is set to 0, ISS is calculated by adding two
* components: a time component which grows by 250000 every second; and
* a per-connection component which grows by 125000 for every new connections.
*
* A third method, when tcp_strong_iss is set to 2, for generating ISS is
* prescribed by Steve Bellovin. This involves adding time, the 125000 per
* connection, and a one-way hash (MD5) of the connection ID <sport, dport,
* src, dst>, a "truly" random (per RFC 1750) number, and a console-entered
* password.
*/
#define ISS_INCR 250000
#define ISS_NSEC_SHT 12
/*
* This implementation follows the 4.3BSD interpretation of the urgent
* pointer and not RFC 1122. Switching to RFC 1122 behavior would cause
* incompatible changes in protocols like telnet and rlogin.
*/
#define TCP_OLD_URP_INTERPRETATION 1
#define TCP_IS_DETACHED_NONEAGER(tcp) \
(TCP_IS_DETACHED(tcp) && \
(!(tcp)->tcp_hard_binding))
/*
* TCP reassembly macros. We hide starting and ending sequence numbers in
* b_next and b_prev of messages on the reassembly queue. The messages are
* chained using b_cont. These macros are used in tcp_reass() so we don't
* have to see the ugly casts and assignments.
*/
/*
* Implementation of TCP Timers.
* =============================
*
* INTERFACE:
*
* There are two basic functions dealing with tcp timers:
*
* timeout_id_t tcp_timeout(connp, func, time)
* clock_t tcp_timeout_cancel(connp, timeout_id)
* TCP_TIMER_RESTART(tcp, intvl)
*
* tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func'
* after 'time' ticks passed. The function called by timeout() must adhere to
* the same restrictions as a driver soft interrupt handler - it must not sleep
* or call other functions that might sleep. The value returned is the opaque
* non-zero timeout identifier that can be passed to tcp_timeout_cancel() to
* cancel the request. The call to tcp_timeout() may fail in which case it
* returns zero. This is different from the timeout(9F) function which never
* fails.
*
* The call-back function 'func' always receives 'connp' as its single
* argument. It is always executed in the squeue corresponding to the tcp
* structure. The tcp structure is guaranteed to be present at the time the
* call-back is called.
*
* NOTE: The call-back function 'func' is never called if tcp is in
* the TCPS_CLOSED state.
*
* tcp_timeout_cancel() attempts to cancel a pending tcp_timeout()
* request. locks acquired by the call-back routine should not be held across
* the call to tcp_timeout_cancel() or a deadlock may result.
*
* tcp_timeout_cancel() returns -1 if it can not cancel the timeout request.
* Otherwise, it returns an integer value greater than or equal to 0. In
* particular, if the call-back function is already placed on the squeue, it can
* not be canceled.
*
* NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called
* within squeue context corresponding to the tcp instance. Since the
* call-back is also called via the same squeue, there are no race
* conditions described in untimeout(9F) manual page since all calls are
* strictly serialized.
*
* TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout
* stored in tcp_timer_tid and starts a new one using
* MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back
* and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid
* field.
*
* NOTE: since the timeout cancellation is not guaranteed, the cancelled
* call-back may still be called, so it is possible tcp_timer() will be
* called several times. This should not be a problem since tcp_timer()
* should always check the tcp instance state.
*
*
* IMPLEMENTATION:
*
* TCP timers are implemented using three-stage process. The call to
* tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function
* when the timer expires. The tcp_timer_callback() arranges the call of the
* tcp_timer_handler() function via squeue corresponding to the tcp
* instance. The tcp_timer_handler() calls actual requested timeout call-back
* and passes tcp instance as an argument to it. Information is passed between
* stages using the tcp_timer_t structure which contains the connp pointer, the
* tcp call-back to call and the timeout id returned by the timeout(9F).
*
* The tcp_timer_t structure is not used directly, it is embedded in an mblk_t -
* like structure that is used to enter an squeue. The mp->b_rptr of this pseudo
* mblk points to the beginning of tcp_timer_t structure. The tcp_timeout()
* returns the pointer to this mblk.
*
* The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It
* looks like a normal mblk without actual dblk attached to it.
*
* To optimize performance each tcp instance holds a small cache of timer
* mblocks. In the current implementation it caches up to two timer mblocks per
* tcp instance. The cache is preserved over tcp frees and is only freed when
* the whole tcp structure is destroyed by its kmem destructor. Since all tcp
* timer processing happens on a corresponding squeue, the cache manipulation
* does not require any locks. Experiments show that majority of timer mblocks
* allocations are satisfied from the tcp cache and do not involve kmem calls.
*
* The tcp_timeout() places a refhold on the connp instance which guarantees
* that it will be present at the time the call-back function fires. The
* tcp_timer_handler() drops the reference after calling the call-back, so the
* call-back function does not need to manipulate the references explicitly.
*/
typedef struct tcp_timer_s {
void (*tcpt_proc)(void *);
} tcp_timer_t;
static kmem_cache_t *tcp_timercache;
/*
* For scalability, we must not run a timer for every TCP connection
* in TIME_WAIT state. To see why, consider (for time wait interval of
* 4 minutes):
* 1000 connections/sec * 240 seconds/time wait = 240,000 active conn's
*
* This list is ordered by time, so you need only delete from the head
* until you get to entries which aren't old enough to delete yet.
* The list consists of only the detached TIME_WAIT connections.
*
* Note that the timer (tcp_time_wait_expire) is started when the tcp_t
* becomes detached TIME_WAIT (either by changing the state and already
* being detached or the other way around). This means that the TIME_WAIT
* state can be extended (up to doubled) if the connection doesn't become
* detached for a long time.
*
* The list manipulations (including tcp_time_wait_next/prev)
* are protected by the tcp_time_wait_lock. The content of the
* detached TIME_WAIT connections is protected by the normal perimeters.
*
* This list is per squeue and squeues are shared across the tcp_stack_t's.
* Things on tcp_time_wait_head remain associated with the tcp_stack_t
* and conn_netstack.
* The tcp_t's that are added to tcp_free_list are disassociated and
* have NULL tcp_tcps and conn_netstack pointers.
*/
typedef struct tcp_squeue_priv_s {
/*
* TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs.
* Running it every 5 seconds seems to give the best results.
*/
/*
* To prevent memory hog, limit the number of entries in tcp_free_list
* to 1% of available memory / number of cpus
*/
#define TCP_XMIT_LOWATER 4096
#define TCP_XMIT_HIWATER 49152
#define TCP_RECV_LOWATER 2048
#define TCP_RECV_HIWATER 49152
/*
* PAWS needs a timer for 24 days. This is the number of ticks in 24 days
*/
/*
* Bind hash list size and has function. It has to be a power of 2 for
* hashing.
*/
#define TCP_BIND_FANOUT_SIZE 512
/*
* Size of listen and acceptor hash list. It has to be a power of 2 for
* hashing.
*/
#define TCP_FANOUT_SIZE 256
#ifdef _ILP32
#define TCP_ACCEPTOR_HASH(accid) \
#else
#define TCP_ACCEPTOR_HASH(accid) \
#endif /* _ILP32 */
#define IP_ADDR_CACHE_SIZE 2048
#define IP_ADDR_CACHE_HASH(faddr) \
/* Hash for HSPs uses all 32 bits, since both networks and hosts are in table */
#define TCP_HSP_HASH_SIZE 256
#define TCP_HSP_HASH(addr) \
/*
* TCP options struct returned from tcp_parse_options.
*/
typedef struct tcp_opt_s {
} tcp_opt_t;
/*
* RFC1323-recommended phrasing of TSTAMP option, for easier parsing
*/
#ifdef _BIG_ENDIAN
#else
#endif
/*
* Flags returned from tcp_parse_options.
*/
#define TCP_OPT_MSS_PRESENT 1
#define TCP_OPT_WSCALE_PRESENT 2
#define TCP_OPT_TSTAMP_PRESENT 4
#define TCP_OPT_SACK_OK_PRESENT 8
#define TCP_OPT_SACK_PRESENT 16
/* TCP option length */
#define TCPOPT_NOP_LEN 1
#define TCPOPT_MAXSEG_LEN 4
#define TCPOPT_WS_LEN 3
#define TCPOPT_TSTAMP_LEN 10
#define TCPOPT_SACK_OK_LEN 2
#define TCPOPT_REAL_SACK_LEN 4
#define TCPOPT_MAX_SACK_LEN 36
#define TCPOPT_HEADER_LEN 2
/* TCP cwnd burst factor. */
#define TCP_CWND_INFINITE 65535
#define TCP_CWND_SS 3
#define TCP_CWND_NORMAL 5
#define TCP_MAX_INIT_CWND 8
/*
* Initialize cwnd according to RFC 3390. def_max_init_cwnd is
* either tcp_slow_start_initial or tcp_slow_start_after idle
* depending on the caller. If the upper layer has not used the
* TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd
* should be 0 and we use the formula in RFC 3390 to set tcp_cwnd.
* If the upper layer has changed set the tcp_init_cwnd, just use
* it to calculate the tcp_cwnd.
*/
{ \
if ((tcp)->tcp_init_cwnd == 0) { \
} else { \
} \
tcp->tcp_cwnd_cnt = 0; \
}
/* TCP Timer control structure */
typedef struct tcpt_s {
} tcpt_t;
/* Host Specific Parameter structure */
typedef struct tcp_hsp {
struct tcp_hsp *tcp_hsp_next;
} tcp_hsp_t;
/*
* Functions called directly via squeue having a prototype of edesc_t.
*/
/* Prototype for TCP functions */
static void tcp_random_init(void);
int tcp_random(void);
int unixerr);
int caller_holds_lock);
void tcp_g_q_setup(tcp_stack_t *);
void tcp_g_q_create(tcp_stack_t *);
void tcp_g_q_destroy(tcp_stack_t *);
static void tcp_keepalive_killer(void *arg);
tcp_stack_t *);
static void tcp_timer_callback(void *);
const int tcp_hdr_len, const int tcp_tcp_hdr_len,
const int mdt_thres);
const int tcp_hdr_len, const int tcp_tcp_hdr_len,
const int mdt_thres);
int num_sack_blk);
static void tcp_ack_timer(void *arg);
extern mblk_t *tcp_timermp_alloc(int);
extern void tcp_timermp_free(tcp_t *);
static void tcp_close_linger_timeout(void *arg);
static void *tcp_g_kstat_init(tcp_g_stat_t *);
static void tcp_g_kstat_fini(kstat_t *);
static squeue_func_t tcp_squeue_switch(int);
static int tcpclose_accept(queue_t *);
static void tcp_squeue_add(squeue_t *);
static void tcp_zcopy_notify(tcp_t *);
/*
* Routines related to the TCP_IOC_ABORT_CONN ioctl command.
*
* TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting
* TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure
* (defined in tcp.h) needs to be filled in and passed into the kernel
* via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t
* structure contains the four-tuple of a TCP connection and a range of TCP
* states (specified by ac_start and ac_end). The use of wildcard addresses
* and ports is allowed. Connections with a matching four tuple and a state
* within the specified range will be aborted. The valid states for the
* ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT,
* inclusive.
*
* An application which has its connection aborted by this ioctl will receive
* an error that is dependent on the connection state at the time of the abort.
* If the connection state is < TCPS_TIME_WAIT, an application should behave as
* though a RST packet has been received. If the connection state is equal to
* TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel
* and all resources associated with the connection will be freed.
*/
static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *);
static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *,
boolean_t, tcp_stack_t *);
static struct module_info tcp_rinfo = {
};
static struct module_info tcp_winfo = {
};
/*
* Entry points for TCP as a device. The normal case which supports
* the TCP functionality.
*/
struct qinit tcp_rinitv4 = {
};
struct qinit tcp_rinitv6 = {
};
};
/* Initial entry point for TCP in socket mode. */
struct qinit tcp_sock_winit = {
};
/*
* Entry points for TCP as a acceptor STREAM opened by sockfs when doing
* an accept. Avoid allocating data structures since eager has already
* been created.
*/
struct qinit tcp_acceptor_rinit = {
};
struct qinit tcp_acceptor_winit = {
};
/*
* Entry points for TCP loopback (read side only)
* The open routine is only used for reopens, thus no need to
* have a separate one for tcp_openv6.
*/
struct qinit tcp_loopback_rinit = {
};
};
};
/*
* Have to ensure that tcp_g_q_close is not done by an
* interrupt thread.
*/
/*
* TCP has a private interface for other kernel modules to reserve a
* port range for them to use. Once reserved, TCP will not use any ports
* in the range. This interface relies on the TCP_EXCLBIND feature. If
* the semantics of TCP_EXCLBIND is changed, implementation of this interface
* has to be verified.
*
* There can be TCP_RESERVED_PORTS_ARRAY_MAX_SIZE port ranges. Each port
* range can cover at most TCP_RESERVED_PORTS_RANGE_MAX ports. A port
* range is [port a, port b] inclusive. And each port range is between
* TCP_LOWESET_RESERVED_PORT and TCP_LARGEST_RESERVED_PORT inclusive.
*
* Note that the default anonymous port range starts from 32768. There is
* no port "collision" between that and the reserved port range. If there
* is port collision (because the default smallest anonymous port is lowered
* or some apps specifically bind to ports in the reserved port range), the
* system may not be able to reserve a port range even there are enough
* unbound ports as a reserved port range contains consecutive ports .
*/
#define TCP_RESERVED_PORTS_ARRAY_MAX_SIZE 5
#define TCP_RESERVED_PORTS_RANGE_MAX 1000
#define TCP_SMALLEST_RESERVED_PORT 10240
#define TCP_LARGEST_RESERVED_PORT 20480
/* Structure to represent those reserved port ranges. */
typedef struct tcp_rport_s {
} tcp_rport_t;
/*
* Following assumes TPI alignment requirements stay along 32 bit
* boundaries
*/
#define ROUNDUP32(x) \
/* Template for response to info request. */
static struct T_info_ack tcp_g_t_info_ack = {
T_INFO_ACK, /* PRIM_type */
0, /* TSDU_size */
T_INFINITE, /* ETSDU_size */
T_INVALID, /* CDATA_size */
T_INVALID, /* DDATA_size */
sizeof (sin_t), /* ADDR_size */
0, /* OPT_size - not initialized here */
TIDUSZ, /* TIDU_size */
T_COTS_ORD, /* SERV_type */
TCPS_IDLE, /* CURRENT_state */
};
static struct T_info_ack tcp_g_t_info_ack_v6 = {
T_INFO_ACK, /* PRIM_type */
0, /* TSDU_size */
T_INFINITE, /* ETSDU_size */
T_INVALID, /* CDATA_size */
T_INVALID, /* DDATA_size */
sizeof (sin6_t), /* ADDR_size */
0, /* OPT_size - not initialized here */
TIDUSZ, /* TIDU_size */
T_COTS_ORD, /* SERV_type */
TCPS_IDLE, /* CURRENT_state */
};
#define MS 1L
/* Max size IP datagram is 64k - 1 */
/* Max of the above */
#define TCP_MSS_MAX TCP_MSS_MAX_IPV4
/* Largest TCP port number */
/*
* layer header. It has to be a multiple of 4.
*/
/*
* Note that the default value of "tcp_time_wait_interval" is four minutes,
* per the TCP spec.
*/
/* BEGIN CSTYLED */
static tcpparam_t lcl_tcp_param_arr[] = {
/*min max value name */
{ 1, 1024, 1, "tcp_conn_req_min" },
{ 128, (1<<30), 1024*1024, "tcp_cwnd_max" },
{ 0, 10, 0, "tcp_debug" },
{ 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"},
{ 1, 255, 64, "tcp_ipv4_ttl"},
{ 0, 100, 10, "tcp_maxpsz_multiplier" },
{ 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"},
{ 0, 16, 0, "tcp_snd_lowat_fraction" },
{ 0, 128000, 0, "tcp_sth_rcv_hiwat" },
{ 0, 128000, 0, "tcp_sth_rcv_lowat" },
{ 1, 10000, 3, "tcp_dupack_fast_retransmit" },
{ 0, 1, 0, "tcp_ignore_path_mtu" },
{ 1, 65536, 4, "tcp_recv_hiwat_minmss"},
{ 8192, (1<<30), 1024*1024, "tcp_max_buf"},
/*
* Question: What default value should I set for tcp_strong_iss?
*/
{ 0, 2, 1, "tcp_strong_iss"},
{ 0, 65536, 20, "tcp_rtt_updates"},
{ 0, 1, 1, "tcp_wscale_always"},
{ 0, 1, 0, "tcp_tstamp_always"},
{ 0, 1, 1, "tcp_tstamp_if_wscale"},
{ 0, 16, 2, "tcp_deferred_acks_max"},
{ 1, 16384, 4, "tcp_slow_start_after_idle"},
{ 1, 4, 4, "tcp_slow_start_initial"},
{ 0, 2, 2, "tcp_sack_permitted"},
{ 0, 1, 0, "tcp_trace"},
{ 0, 1, 1, "tcp_compression_enabled"},
{ 0, 1, 0, "tcp_rev_src_routes"},
{ 0, 16, 8, "tcp_local_dacks_max"},
{ 0, 2, 1, "tcp_ecn_permitted"},
{ 0, 1, 1, "tcp_rst_sent_rate_enabled"},
{ 0, 1, 0, "tcp_use_smss_as_mss_opt"},
};
/* END CSTYLED */
/*
* tcp_mdt_hdr_{head,tail}_min are the leading and trailing spaces of
* each header fragment in the header buffer. Each parameter value has
* to be a multiple of 4 (32-bit aligned).
*/
static tcpparam_t lcl_tcp_mdt_head_param =
{ 32, 256, 32, "tcp_mdt_hdr_head_min" };
static tcpparam_t lcl_tcp_mdt_tail_param =
{ 0, 256, 32, "tcp_mdt_hdr_tail_min" };
/*
* tcp_mdt_max_pbufs is the upper limit value that tcp uses to figure out
* the maximum number of payload buffers associated per Multidata.
*/
static tcpparam_t lcl_tcp_mdt_max_pbufs_param =
/* Round up the value to the nearest mss. */
/*
* Set ECN capable transport (ECT) code point in IP header.
*
* Note that there are 2 ECT code points '01' and '10', which are called
* ECT(1) and ECT(0) respectively. Here we follow the original ECT code
* point ECT(0) for TCP as described in RFC 2481.
*/
/* We need to clear the code point first. */ \
} else { \
}
/*
* The format argument to pass to tcp_display().
* DISP_PORT_ONLY means that the returned string has only port info.
* DISP_ADDR_AND_PORT means that the returned string also contains the
* remote and local IP address.
*/
#define DISP_PORT_ONLY 1
#define DISP_ADDR_AND_PORT 2
#define NDD_TOO_QUICK_MSG \
"ndd get info rate too high for non-privileged users, try again " \
"later.\n"
#define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n"
#define IS_VMLOANED_MBLK(mp) \
/* Enable or disable b_cont M_MULTIDATA chaining for MDT. */
/*
* MDT threshold in the form of effective send MSS multiplier; we take
* the MDT path if the amount of unsent data exceeds the threshold value
* (default threshold is 1*SMSS).
*/
/*
* Forces all connections to obey the value of the tcps_maxpsz_multiplier
* tunable settable via NDD. Otherwise, the per-connection behavior is
* determined dynamically during tcp_adapt_ire(), which is the default.
*/
/* If set to 0, pick ephemeral port sequentially; otherwise randomly. */
/*
* To reach to an eager in Q0 which can be dropped due to an incoming
* new SYN request when Q0 is full, a new doubly linked list is
* introduced. This list allows to select an eager from Q0 in O(1) time.
* This is needed to avoid spending too much time walking through the
* long list of eagers in Q0 when tcp_drop_q0() is called. Each member of
* this new list has to be a member of Q0.
* This list is headed by listener's tcp_t. When the list is empty,
* both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0,
* of listener's tcp_t point to listener's tcp_t itself.
*
* Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager
* in the list. MAKE_UNDROPPABLE() takes the eager out of the list.
* These macros do not affect the eager's membership to Q0.
*/
= (eager); \
(eager)->tcp_eager_next_drop_q0 = \
}
#define MAKE_UNDROPPABLE(eager) \
= (eager)->tcp_eager_prev_drop_q0; \
= (eager)->tcp_eager_next_drop_q0; \
}
/*
* If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more
* than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent
* data, TCP will not respond with an ACK. RFC 793 requires that
* TCP responds with an ACK for such a bogus ACK. By not following
* the RFC, we prevent TCP from getting into an ACK storm if somehow
* an attacker successfully spoofs an acceptable segment to our
* peer; or when our peer is "confused."
*/
/*
* Hook functions to enable cluster networking
* On non-clustered systems these vectors must always be NULL.
*/
/*
* The following are defined in ip.c
*/
#define CL_INET_CONNECT(tcp) { \
if (cl_inet_connect != NULL) { \
/* \
* Running in cluster mode - register active connection \
* information \
*/ \
} \
} else { \
if (!IN6_IS_ADDR_UNSPECIFIED( \
} \
} \
} \
}
#define CL_INET_DISCONNECT(tcp) { \
if (cl_inet_disconnect != NULL) { \
/* \
* Running in cluster mode - deregister active \
* connection information \
*/ \
if ((tcp)->tcp_ip_src != 0) { \
AF_INET, \
(uint8_t *) \
} \
} else { \
if (!IN6_IS_ADDR_UNSPECIFIED( \
&(tcp)->tcp_ip_src_v6)) { \
(uint8_t *) \
} \
} \
} \
}
/*
* Cluster networking hook for traversing current connection list.
* This routine is used to extract the current list of live connections
* which must continue to to be dispatched to this node.
*/
/*
* Figure out the value of window scale opton. Note that the rwnd is
* ASSUMED to be rounded up to the nearest MSS before the calculation.
* We cannot find the scale value and then do a round up of tcp_rwnd
* because the scale value may not be correct after that.
*
* Set the compiler flag to make this function inline.
*/
static void
{
int i;
i++, rwnd >>= 1)
;
tcp->tcp_rcv_ws = i;
}
/*
* Remove a connection from the list of detached TIME_WAIT connections.
* It returns B_FALSE if it can't remove the connection from the list
* as the connection has already been removed from the list due to an
* earlier call to tcp_time_wait_remove(); otherwise it returns B_TRUE.
*/
static boolean_t
{
if (tcp_time_wait == NULL) {
tcp_time_wait = *((tcp_squeue_priv_t **)
} else {
}
if (tcp->tcp_time_wait_expire == 0) {
if (locked)
return (B_FALSE);
}
NULL;
} else {
}
} else {
}
tcp->tcp_time_wait_expire = 0;
if (locked)
return (B_TRUE);
}
/*
* Add a connection to the list of detached TIME_WAIT connections
* and set its time to expire.
*/
static void
{
/* Freed above */
/* must have happened at the time of detaching the tcp */
/*
* The value computed below in tcp->tcp_time_wait_expire may
* appear negative or wrap around. That is ok since our
* interest is only in the difference between the current lbolt
* value and tcp->tcp_time_wait_expire. But the value should not
* be zero, since it means the tcp is not in the TIME_WAIT list.
* The corresponding comparison in tcp_time_wait_collector() uses
* modular arithmetic.
*/
if (tcp->tcp_time_wait_expire == 0)
} else {
}
}
/* ARGSUSED */
void
{
return;
}
/*
* Because they have no upstream client to rebind or tcp_close()
* them later, we axe the connection here and now.
*/
}
/*
*/
void
{
}
}
}
/*
* Cleaup before placing on free list.
* Disassociate from the netstack/tcp_stack_t since the freelist
* is per squeue and not per netstack.
*/
void
{
char *tcp_iphc;
int tcp_iphc_len;
int tcp_hdr_grown;
/* Cleanup that which needs the netstack first */
/* Release any SSL context */
}
}
/*
* Since we will bzero the entire structure, we need to
* remove it and reinsert it in global hash list. We
* know the walkers can't get to this conn because we
* had set CONDEMNED flag earlier and checked reference
* under conn_lock so walker won't pick it and when we
* go the ipcl_globalhash_remove() below, no walker
* can get to it.
*/
/*
* Now it is safe to decrement the reference counts.
* This might be the last reference on the netstack and TCPS
* in which case it will cause the tcp_g_q_close and
* the freeing of the IP Instance.
*/
/* Save some state */
}
}
/* restore the state */
}
/*
* Blows away all tcps whose TIME_WAIT has expired. List traversal
* is done forwards from the head.
* This walks all stack instances since
* tcp_time_wait remains global across all stacks.
*/
/* ARGSUSED */
void
tcp_time_wait_collector(void *arg)
{
}
}
/*
* In order to reap time waits reliably, we should use a
* source of time that is not adjustable by the user -- hence
* the call to ddi_get_lbolt().
*/
now = ddi_get_lbolt();
/*
* Compare times using modular arithmetic, since
* lbolt can wrapover.
*/
break;
}
/*
* This is essentially a TW reclaim fast path optimization for
* performance where the timewait collector checks under the
* fanout lock (so that no one else can get access to the
* conn_t) that the refcnt is 2 i.e. one for TCP and one for
* the classifier hash list. If ref count is indeed 2, we can
* just remove the conn under the fanout lock and avoid
* cleaning up the conn under the squeue, provided that
* clustering callbacks are not enabled. If clustering is
* enabled, we need to make the clustering callback before
* setting the CONDEMNED flag and after dropping all locks and
* so we forego this optimization and fall back to the slow
* path. Also please see the comments in tcp_closei_local
* regarding the refcnt logic.
*
* Since we are holding the tcp_time_wait_lock, its better
* not to block on the fanout_lock because other connections
* can't add themselves to time_wait list. So we do a
* tryenter instead of mutex_enter.
*/
if (mutex_tryenter(lock)) {
(cl_inet_disconnect == NULL)) {
connp->conn_fanout);
/*
* Set the CONDEMNED flag now itself so that
* the refcnt cannot increase due to any
* walker. But we have still not cleaned up
* conn_ire_cache. This is still ok since
* we are going to clean it up in tcp_cleanup
* immediately and any interface unplumb
* thread will wait till the ire is blown away
*/
if (tcp_time_wait->tcp_free_list_cnt <
/* Add to head of tcp_free_list */
continue;
} else {
/* Do not add to tcp_free_list */
}
} else {
/*
* We can reuse the closemp here since conn has
* detached (otherwise we wouldn't even be in
* time_wait list). tcp_closemp_used can safely
* be changed without taking a lock as no other
* thread can concurrently access it at this
* point in the connection lifecycle.
*/
else
"tcp_timewait_collector: "
"concurrent use of tcp_closemp: "
"connp %p tcp %p\n", (void *)connp,
(void *)tcp);
}
} else {
/*
* We can reuse the closemp here since conn has
* detached (otherwise we wouldn't even be in
* time_wait list). tcp_closemp_used can safely
* be changed without taking a lock as no other
* thread can concurrently access it at this
* point in the connection lifecycle.
*/
else
"concurrent use of tcp_closemp: "
"connp %p tcp %p\n", (void *)connp,
(void *)tcp);
tcp_timewait_output, connp, 0);
}
}
}
/*
* Reply to a clients T_CONN_RES TPI message. This function
* on the acceptor STREAM and processed in tcp_wput_accept().
* Read the block comment on top of tcp_conn_request().
*/
static void
{
struct T_conn_res *tcr;
return;
}
/*
* Under ILP32 the stream head points tcr->ACCEPTOR_id at the
* read side queue of the streams device underneath us i.e. the
* read side queue of 'ip'. Since we can't deference QUEUE_ptr we
* look it up in the queue_hash. Under LP64 it sends down the
* minor_t of the accepting endpoint.
*
* fanout hash lock is held.
* This prevents any thread from entering the acceptor queue from
* below (since it has not been hard bound yet i.e. any inbound
* packets will arrive on the listener or default tcp queue and
* go through tcp_lookup).
* The CONN_INC_REF will prevent the acceptor from closing.
*
* XXX It is still possible for a tli application to send down data
* on the accepting stream while another thread calls t_accept.
* This should not be a problem for well-behaved applications since
* the T_OK_ACK is sent after the queue swapping is completed.
*
* If the accepting fd is the same as the listening fd, avoid
* queue hash lookup since that will return an eager listener in a
* already established state.
*/
/* only count how many T_CONN_INDs so don't count q0 */
return;
}
if (listener->tcp_conn_req_cnt_q0 != 0) {
/* Throw away all the eagers on q0. */
}
if (listener->tcp_syn_defense) {
IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
}
}
/*
* Transfer tcp_conn_req_max to the eager so that when
* a disconnect occurs we can revert the endpoint to the
* listen state.
*/
/*
* Get a reference on the acceptor just like the
* tcp_acceptor_hash_lookup below.
*/
} else {
"tcp_accept: did not find acceptor 0x%x\n",
}
return;
}
/*
* Verify acceptor state. The acceptable states for an acceptor
* include TCPS_IDLE and TCPS_BOUND.
*/
case TCPS_IDLE:
/* FALLTHRU */
case TCPS_BOUND:
break;
default:
return;
}
}
/* The listener must be in TCPS_LISTEN */
return;
}
/*
* Rendezvous with an eager connection request packet hanging off
* 'tcp' that has the 'seqnum' tag. We tagged the detached open
* tcp structure when the connection packet arrived in
* tcp_conn_request().
*/
do {
return;
}
/*
* At this point, both acceptor and listener have 2 ref
* that they begin with. Acceptor has one additional ref
* we placed in lookup while listener has 3 additional
* ref for being behind the squeue (tcp_accept() is
* done on listener's squeue); being in classifier hash;
* and eager's ref on listener.
*/
/*
* The eager at this point is set in its own squeue and
* could easily have been killed (tcp_accept_finish will
* deal with that) because of a TH_RST so we can only
* ASSERT for a single ref.
*/
/* Pre allocate the stroptions mblk also */
return;
}
/*
* Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO
* from listener to acceptor. The message is chained on opt_mp
* which will be sent onto eager's squeue.
*/
if (listener->tcp_bound_if != 0) {
/* allocate optmgmt req */
sizeof (int));
}
/* allocate optmgmt req */
}
/* Re-use mp1 to hold a copy of mp, in case reallocb fails */
return;
}
/*
* This is an expanded version of mi_tpi_ok_ack_alloc()
* which allocates a larger mblk and appends the new
* local address to the ok_ack. The address is copied by
* soaccept() for getsockname().
*/
{
int extra;
/*
* Try to re-use mp, if possible. Otherwise, allocate
* an mblk and return it as ok_mp. In any case, mp
* is no longer usable upon return.
*/
/* Original mp has been freed by now, so use mp1 */
return;
}
switch (extra) {
case sizeof (sin_t): {
break;
}
case sizeof (sin6_t): {
sin6->sin6_flowinfo = 0;
} else {
}
sin6->sin6_scope_id = 0;
sin6->__sin6_src_id = 0;
break;
}
default:
break;
}
}
/*
* If there are no options we know that the T_CONN_RES will
* succeed. However, we can't send the T_OK_ACK upstream until
* the tcp_accept_swap is done since it would be dangerous to
* let the application start using the new fd prior to the swap.
*/
/*
* tcp_accept_swap unlinks eager from listener but does not drop
* the eager's reference on the listener.
*/
/*
* The eager is now associated with its own queue. Insert in
* the hash so that the connection can be reused for a future
* T_CONN_RES.
*/
/*
* We now do the processing of options with T_CONN_RES.
* We delay till now since we wanted to have queue to pass to
* option processing routines that points back to the right
* instance structure which does not happen until after
* tcp_accept_swap().
*
* Note:
* The sanity of the logic here assumes that whatever options
* are appropriate to inherit from listner=>eager are done
* before this point, and whatever were to be overridden (or not)
* in transfer logic from eager=>acceptor in tcp_accept_swap().
* [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it
* before its ACCEPTOR_id comes down in T_CONN_RES ]
* This may not be true at this point in time but can be fixed
* independently. This option processing code starts with
* the instantiated acceptor instance and the final queue at
* this point.
*/
if (tcr->OPT_length != 0) {
/* Options to process */
int t_error = 0;
int sys_error = 0;
int do_disconnect = 0;
if (do_disconnect) {
/*
* An option failed which does not allow
* connection to be accepted.
*
* We allow T_CONN_RES to succeed and
* put a T_DISCON_IND on the eager queue.
*/
} else {
/*
* Original mp was either freed or set
* to ok_mp above, so use mp1 instead.
*/
goto finish;
}
}
/*
* Most likely success in setting options (except if
* eager->tcp_send_discon_ind set).
* mp1 option buffer represented by OPT_length/offset
* potentially modified and contains results of setting
* options at this point
*/
}
/* We no longer need mp1, since all options processing has passed */
/*
* This path should not be executed if listener and
* acceptor streams are the same.
*/
/*
* listener->tcp_eager_prev_q0 points to the TAIL of the
* deferred T_conn_ind queue. We need to get to the head of
* the queue in order to send up T_conn_ind the same order as
* how the 3WHS is completed.
*/
break;
else
}
/* Move from q0 to q */
/* Make sure the tcp isn't in the list of droppables */
/*
* Insert at end of the queue because sockfs sends
* down T_CONN_RES in chronological order. Leaving
* the older conn indications at front of the queue
* helps reducing search time.
*/
else
} else {
}
/*
* Done with the acceptor - free it
*
* Note: from this point on, no access to listener should be made
* as listener can be equal to acceptor.
*/
/*
* In case we already received a FIN we have to make tcp_rput send
* the ordrel_ind. This will also send up a window update if the window
* has opened up.
*
* In the normal case of a successful connection acceptance
* we give the O_T_BIND_REQ to the read side put procedure as an
* indication that this was just accepted. This tells tcp_rput to
* pass up any data queued in tcp_rcv_list.
*
* In the fringe case where options sent with T_CONN_RES failed and
* we required, we would be indicating a T_DISCON_IND to blow
* away this connection.
*/
/*
* XXX: we currently have a problem if XTI application closes the
* acceptor stream in between. This problem exists in on10-gate also
* and is well know but nothing can be done short of major rewrite
* eager same squeue as listener (we can distinguish non socket
* listeners at the time of handling a SYN in tcp_conn_request)
* and do most of the work that tcp_accept_finish does here itself
* and then get behind the acceptor squeue to access the acceptor
* queue.
*/
/*
* We already have a ref on tcp so no need to do one before squeue_fill
*/
}
/*
* The sockfs accept is done on the acceptor stream and control goes
* through tcp_wput_accept() and tcp_accept()/tcp_accept_swap() is not
* called. In either case, both the eager and listener are in their own
* perimeter (squeue) and the code has to deal with potential race.
*
* See the block comment on top of tcp_accept() and tcp_wput_accept().
*/
static void
{
/*
* the acceptor id.
*/
/* remove eager from listen list... */
/*
* which might be a different squeue from our peer TCP instance.
* For TCP Fusion, the peer expects that whenever tcp_detached is
* clear, our TCP queues point to the acceptor's queues. Thus, use
* above reach global visibility prior to the clearing of tcp_detached.
*/
/* Do the IPC initialization */
/* Done with old IPC. Drop its ref on its connp */
}
/*
* Adapt to the information, such as rtt and rtt_sd, provided from the
* ire cached in conn_cache_ire. If no ire cached, do a ire lookup.
*
* Checks for multicast and broadcast destination address.
* Returns zero on failure; non-zero if ok.
*
* Note that the MSS calculation here is based on the info given in
* the IRE. We do not do any calculation based on TCP options. They
* will be handled in tcp_rput_other() and tcp_rput_data() when TCP
* knows which options to use.
*
* Note on how TCP gets its parameters for a connection.
*
* When a tcp_t structure is allocated, it gets all the default parameters.
* In tcp_adapt_ire(), it gets those metric parameters, like rtt, rtt_sd,
* spipe, rpipe, ... from the route metrics. Route metric overrides the
* default. But if there is an associated tcp_host_param, it will override
* the metrics.
*
* An incoming SYN with a multicast or broadcast destination address, is dropped
* in 1 of 2 places.
*
* 1. If the packet was received over the wire it is dropped in
* ip_rput_process_broadcast()
*
* 2. If the packet was received through internal IP loopback, i.e. the packet
* was generated and received on the same machine, it is dropped in
* ip_wput_local()
*
* An incoming SYN with a multicast or broadcast source address is always
* dropped in tcp_adapt_ire. The same logic in tcp_adapt_ire also serves to
* reject an attempt to connect to a broadcast or multicast (destination)
* address.
*/
static int
{
return (0);
}
/*
* If IP_NEXTHOP is set, then look for an IRE_CACHE
* for the destination with the nexthop as gateway.
* ire_ctable_lookup() is used because this particular
* ire, if it exists, will be marked private.
* If that is not available, use the interface ire
* for the nexthop.
*
* TSol: tcp_update_label will detect label mismatches based
* only on the destination's label, but that would not
* detect label mismatches based on the security attributes
* of routes or next hop gateway. Hence we need to pass the
* label to ire_ftable_lookup below in order to locate the
* pass the label to the ire_cache_lookup below to locate
* the right ire that also matches on the label.
*/
ipst);
return (0);
} else {
}
} else {
} else {
return (0);
} else {
&((ire_t *)
}
}
}
/*
* ire->ire_mp is non null when ire_mp passed in is used
* ire->ire_mp is set in ip_bind_insert_ire[_v6]().
*/
return (0);
}
/*
* ip_bind_connected() has stored the correct source
* address in conn_src.
*/
/*
* Copy of the src addr. in tcp_t is needed
* for the lookup funcs.
*/
}
/*
* Set the fragment bit so that IP will tell us if the MTU
* should change. IP tells us the latest setting of
* ip_path_mtu_discovery through ire_frag_flag.
*/
if (ipst->ips_ip_path_mtu_discovery) {
}
/*
* If ire_uinfo is NULL, this is the IRE_INTERFACE case
* for IP_NEXTHOP. No cache ire has been found for the
* destination and we are working with the nexthop's
* interface ire. Since we need to forward all packets
* to the nexthop first, we "blindly" set tcp_localnet
* to false, eventhough the destination may also be
* onlink.
*/
tcp->tcp_localnet = 0;
else
} else {
/*
* For incoming connection ire_mp = NULL
* For outgoing connection ire_mp != NULL
* Technically we should check conn_incoming_ill
* when ire_mp is NULL and conn_outgoing_ill when
* ire_mp is non-NULL. But this is performance
* critical path and for IPV*_BOUND_IF, outgoing
* and incoming ill are always set to the same value.
*/
/* Outgoing or incoming path */
int err;
ip1dbg(("tcp_adapt_ire: ill_lookup failed\n"));
return (0);
}
}
} else {
return (0);
}
} else {
}
}
/*
* ire->ire_mp is non null when ire_mp passed in is used
* ire->ire_mp is set in ip_bind_insert_ire[_v6]().
*/
return (0);
}
/*
* ip_bind_connected_v6() has stored the correct source
* address per IPv6 addr. selection policy in
* conn_src_v6.
*/
/*
* Copy of the src addr. in tcp_t is needed
* for the lookup funcs.
*/
&connp->conn_srcv6));
}
tcp->tcp_localnet =
}
/*
* This allows applications to fail quickly when connections are made
* to dead hosts. Hosts can be labeled dead by adding a reject route
* with both the RTF_REJECT and RTF_PRIVATE flags set.
*/
goto error;
/*
* Make use of the cached rtt and rtt_sd values to calculate the
* initial RTO. Note that they are already initialized in
* tcp_init_values().
* If ire_uinfo is NULL, i.e., we do not have a cache ire for
* IP_NEXTHOP, but instead are using the interface ire for the
* nexthop, then we do not use the ire_uinfo from that ire to
* do any initializations.
*/
} else {
}
}
if (ire_uinfo->iulp_ssthresh != 0)
else
if (ire_uinfo->iulp_spipe > 0) {
tcps->tcps_max_buf);
if (tcps->tcps_snd_lowat_fraction != 0)
}
/*
* Note that up till now, acceptor always inherits receive
* window from the listener. But if there is a metrics
* associated with a host, we should use that instead of
* inheriting it from listener. Thus we need to pass this
* info back to the caller.
*/
if (ire_uinfo->iulp_rpipe > 0) {
tcps->tcps_max_buf);
}
if (ire_uinfo->iulp_rtomax > 0) {
}
/*
* Use the metric option settings, iulp_tstamp_ok and
* iulp_wscale_ok, only for active open. What this means
* is that if the other side uses timestamp or window
* scale option, TCP will also use those options. That
* is for passive open. If the application sets a
* large window, window scale is enabled regardless of
* the value in iulp_wscale_ok. This is the behavior
* since 2.6. So we keep it.
* The only case left in passive open processing is the
* check for SACK.
* For ECN, it should probably be like SACK. But the
* current value is binary, so we treat it like the other
* cases. The metric only controls active open.For passive
* open, the ndd param, tcp_ecn_permitted, controls the
* behavior.
*/
if (!tcp_detached) {
/*
* The if check means that the following can only
* be turned on by the metrics only IRE, but not off.
*/
if (ire_uinfo->iulp_tstamp_ok)
if (ire_uinfo->iulp_wscale_ok)
if (ire_uinfo->iulp_ecn_ok)
} else {
/*
* Passive open.
*
* As above, the if check means that SACK can only be
* turned on by the metric only IRE.
*/
}
}
}
/*
* XXX: Note that currently, ire_max_frag can be as small as 68
* because of PMTUd. So tcp_mss may go to negative if combined
* length of all those options exceeds 28 bytes. But because
* of the tcp_mss_min check below, we may not have a problem if
* tcp_mss_min is of a reasonable value. The default is 1 so
* the negative problem still exists. And the check defeats PMTUd.
* In fact, if PMTUd finds that the MSS should be smaller than
* tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
* value.
*
* We do not deal with that now. All those problems related to
* PMTUd will be fixed later.
*/
}
}
/* Sanity check for MSS value. */
else
/*
* After receiving an ICMPv6 "packet too big" message with a
* MTU < 1280, and for multirouted IPv6 packets, the IP layer
* will insert a 8-byte fragment header in every packet; we
* reduce the MSS by that amount here.
*/
mss -= sizeof (ip6_frag_t);
}
if (tcp->tcp_ipsec_overhead == 0)
/* Note that this is the maximum MSS, excluding all options. */
/*
* Initialize the ISS here now that we have the full connection ID.
* The RFC 1948 method of initial sequence number generation requires
* knowledge of the full connection ID before setting the ISS.
*/
} else {
}
/* Only modify if we're going to make them bigger */
if (tcps->tcps_snd_lowat_fraction != 0)
}
}
/* Copy timestamp flag only for active open */
if (!tcp_detached)
}
/*
* If we got an IRE_CACHE and an ILL, go through their properties;
* otherwise, this is deferred until later when we have an IRE_CACHE.
*/
if (tcp->tcp_loopback ||
/*
* For incoming, see if this tcp may be MDT-capable. For
* outgoing, this process has been taken care of through
* tcp_rput_other.
*/
}
/*
* Make sure that conn is not marked incipient
* for incoming connections. A blind
* removal of incipient flag is cheaper than
* check and removal.
*/
/*
* Must not cache forwarding table routes
* or recache an IRE after the conn_t has
* had conn_ire_cache cleared and is flagged
* unusable, (see the CONN_CACHE_IRE() macro).
*/
return (1);
}
}
return (1);
return (0);
}
/*
* tcp_bind is called (holding the writer lock) by tcp_wput_proto to process a
* O_T_BIND_REQ/T_BIND_REQ message.
*/
static void
{
struct T_bind_req *tbr;
int err;
"tcp_bind: bad req, len %u",
}
return;
}
/* Make sure the largest address fits */
return;
}
tbr->CONIND_number > 0) {
/*
* Handle listen() increasing CONIND_number.
* This is more "liberal" then what the TPI spec
* requires but is needed to avoid a t_unbind
* when handling listen() since the port number
* might be "stolen" between the unbind and bind.
*/
goto do_bind;
}
}
return;
}
switch (tbr->ADDR_length) {
case 0: /* request for a generic port */
} else {
}
requested_port = 0;
break;
case sizeof (sin_t): /* Complete IPv4 address */
sizeof (sin_t));
"tcp_bind: bad address parameter, "
"offset %d, len %d",
}
return;
}
/*
* With sockets sockfs will accept bogus sin_family in
* bind() and replace it with the family used in the socket
* call.
*/
return;
}
break;
case sizeof (sin6_t): /* Complete IPv6 address */
"tcp_bind: bad IPv6 address parameter, "
tbr->ADDR_length);
}
return;
}
return;
}
break;
default:
"tcp_bind: bad address length, %d",
tbr->ADDR_length);
}
return;
}
/* Check for change in ipversion */
if (err) {
return;
}
}
/*
* Initialize family specific fields. Copy of the src addr.
* in tcp_t is needed for the lookup funcs.
*/
} else {
}
/*
* For O_T_BIND_REQ:
* another.
* For T_BIND_REQ:
* In both cases when it succeeds the tcp is inserted in the
* bind hash table. This ensures that the operation is atomic
* under the lock on the hash bucket.
*/
bind_to_req_port_only = requested_port != 0 &&
/*
* Get a valid port (within the anonymous range and should not
* be a privileged one) to use if the user has not given a port.
* If multiple threads are here, they may all start with
* with the same initial port. But, it should be fine as long as
* tcp_bindi will ensure that no two threads will be assigned
* the same port.
*
* NOTE: XXX If a privileged process asks for an anonymous port, we
* still check for ports only in the range > tcp_smallest_non_priv_port,
* unless TCP_ANONPRIVBIND option is set.
*/
if (requested_port == 0) {
if (requested_port == 0) {
return;
}
/*
* If the user went through one of the RPC interfaces to create
* this socket and RPC is MLP in this zone, then give him an
* anonymous MLP.
*/
if (addrtype == mlptSingle) {
return;
}
}
} else {
int i;
/*
* If the requested_port is in the well-known privileged range,
* verify that the stream was opened by a privileged user.
* Note: No locks are held when inspecting tcp_g_*epriv_ports
* but instead the code relies on:
* - the fact that the address of the array and its size never
* changes
* - the atomic assignment of the elements of the array
*/
} else {
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
if (requested_port ==
tcps->tcps_g_epriv_ports[i]) {
break;
}
}
}
if (priv) {
IPPROTO_TCP) != 0) {
"tcp_bind: no priv for port %d",
}
return;
}
}
if (is_system_labeled()) {
if (addrtype == mlptSingle) {
return;
}
}
}
if (mlptype != mlptSingle) {
if (secpolicy_net_bindmlp(cr) != 0) {
"tcp_bind: no priv for multilevel port %d",
}
return;
}
/*
* If we're specifically binding a shared IP address and the
* port is MLP on shared addresses, then check to see if this
* zone actually owns the MLP. Reject if not.
*/
/*
* No need to handle exclusive-stack zones since
* ALL_ZONES only applies to the shared stack.
*/
"tcp_bind: attempt to bind port "
"%d on shared addr in zone %d "
"(should be %d)",
mlpzone);
}
return;
}
}
if (!user_specified) {
if (err != 0) {
"tcp_bind: cannot establish anon "
"MLP for port %d",
}
return;
}
}
}
if (allocated_port == 0) {
if (connp->conn_anon_port) {
}
if (bind_to_req_port_only) {
"tcp_bind: requested addr busy");
}
} else {
/* If we are out of ports, fail the bind. */
"tcp_bind: out of ports?");
}
}
return;
}
if (!backlog_update) {
else
}
if (tbr->CONIND_number != 0) {
sizeof (sin_t));
} else {
/* Just verify the local IP address */
}
} else {
if (tbr->CONIND_number != 0) {
sizeof (sin6_t));
} else {
/* Just verify the local IP address */
}
}
if (connp->conn_anon_port) {
}
return;
}
/* Chain in the reply mp for tcp_rput() */
if (tcp->tcp_conn_req_max) {
/*
* If this is a listener, do not reset the eager list
* and other stuffs. Note that we don't check if the
* existing eager list meets the new tcp_conn_req_max
* requirement.
*/
/* Initialize the chain. Don't need the eager_lock */
}
}
/*
* We can call ip_bind directly which returns a T_BIND_ACK mp. The
* processing continues in tcp_rput_other().
*
* We need to make sure that the conn_recv is set to a non-null
* value before we insert the conn into the classifier table.
* This is to avoid a race with an incoming packet which does an
* ipcl_classify().
*/
} else {
}
/*
* If the bind cannot complete immediately
* IP will arrange to call tcp_rput_other
* when the bind completes.
*/
} else {
/*
* Bind will be resumed later. Need to ensure
* that conn doesn't disappear when that happens.
* This will be decremented in ip_resume_tcp_bind().
*/
}
}
/*
* If the "bind_to_req_port_only" parameter is set, if the requested port
* number is available, return it, If not return 0
*
* If "bind_to_req_port_only" parameter is not set and
* If the requested port number is available, return it. If not, return
* the first anonymous port we happen across. If no anonymous ports are
* available, return 0. addr is the requested local address, if any.
*
* In either case, when succeeding update the tcp_t to record the port number
* and insert it in the bind hash table.
*
* Note that TCP over IPv4 and IPv6 sockets can use the same port number
* without setting SO_REUSEADDR. This is needed so that they
* can be viewed as two independent transport protocols.
*/
static in_port_t
{
/* number of times we have run around the loop */
int count = 0;
/* maximum number of times to run around the loop */
int loopmax;
/*
* Lookup for free addresses is done in a loop and "loopmax"
* influences how long we spin in the loop
*/
if (bind_to_req_port_only) {
/*
* If the requested port is busy, don't bother to look
* for a new one. Setting loop maximum count to 1 has
* that effect.
*/
loopmax = 1;
} else {
/*
* If the requested port is busy, look for a free one
* in the anonymous port range.
* Set loopmax appropriately so that one does not look
* forever in the case all of the anonymous ports are in use.
*/
if (tcp->tcp_anon_priv_bind) {
/*
* loopmax =
* (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1
*/
} else {
}
}
do {
/*
* Ensure that the tcp_t is not currently in the bind hash.
* Hold the lock on the hash bucket to ensure that
* the duplicate check plus the insertion is an atomic
* operation.
*
* This function does an inline lookup on the bind hash list
* Make sure that we access only members of tcp_t
* and that we don't look at tcp_tcp, since we are not
* doing a CONN_INC_REF.
*/
continue;
/*
* On a labeled system, we must treat bindings to ports
* on shared IP addresses by sockets with MAC exemption
* privilege as being in all zones, as there's
* otherwise no way to identify the right receiver.
*/
!lconnp->conn_mac_exempt &&
continue;
/*
* If TCP_EXCLBIND is set for either the bound or
* binding endpoint, the semantics of bind
* is changed according to the following.
*
* spec = specified address (v4 or v6)
* unspec = unspecified address (v4 or v6)
* A = specified addresses are different for endpoints
*
* bound bind to allowed
* -------------------------------------
* unspec unspec no
* unspec spec no
* spec unspec no
* spec spec yes if A
*
* For labeled systems, SO_MAC_EXEMPT behaves the same
* as TCP_EXCLBIND, except that zoneid is ignored.
*
* Note:
*
* 1. Because of TLI semantics, an endpoint can go
* back from, say TCP_ESTABLISHED to TCPS_LISTEN or
* TCPS_BOUND, depending on whether it is originally
* a listener or not. That is why we need to check
* for states greater than or equal to TCPS_BOUND
* here.
*
* 2. Ideally, we should only check for state equals
* to TCPS_LISTEN. And the following check should be
* added.
*
* if (ltcp->tcp_state == TCPS_LISTEN ||
* !reuseaddr || !ltcp->tcp_reuseaddr) {
* ...
* }
*
* The semantics will be changed to this. If the
* endpoint on the list is in state not equal to
* TCPS_LISTEN and both endpoints have SO_REUSEADDR
* set, let the bind succeed.
*
* Because of (1), we cannot do that for TLI
* endpoints. But we can do that for socket endpoints.
* If in future, we can change this going back
* semantics, we can use the above check for TLI also.
*/
TCP_IS_SOCKET(tcp));
(exclbind && (not_socket ||
if (V6_OR_V4_INADDR_ANY(
ltcp->tcp_bound_source_v6) ||
V6_OR_V4_INADDR_ANY(*laddr) ||
<cp->tcp_bound_source_v6)) {
break;
}
continue;
}
/*
* Check ipversion to allow IPv4 and IPv6 sockets to
* have disjoint port number spaces, if *_EXCLBIND
* is not set and only if the application binds to a
* specific port. We use the same autoassigned port
* number space for IPv4 and IPv6 sockets.
*/
continue;
/*
* Ideally, we should make sure that the source
* address, remote address, and remote port in the
* four tuple for this tcp-connection is unique.
* However, trying to find out the local source
* address would require too much code duplication
* with IP, since IP needs needs to have that code
* to support userland TCP implementations.
*/
if (quick_connect &&
<cp->tcp_remote_v6)))
continue;
if (!reuseaddr) {
/*
* No socket option SO_REUSEADDR.
* If existing port is bound to
* a non-wildcard IP address
* and the requesting stream is
* bound to a distinct
* different IP addresses
* (non-wildcard, also), keep
* going.
*/
if (!V6_OR_V4_INADDR_ANY(*laddr) &&
ltcp->tcp_bound_source_v6) &&
continue;
/*
* This port is being used and
* its state is >= TCPS_BOUND,
* so we can't bind to it.
*/
break;
}
} else {
/*
* socket option SO_REUSEADDR is set on the
* binding tcp_t.
*
* If two streams are bound to
* same IP address or both addr
* and bound source are wildcards
* (INADDR_ANY), we want to stop
* searching.
* We have found a match of IP source
* address and source port, which is
* refused regardless of the
* SO_REUSEADDR setting, so we break.
*/
if (IN6_ARE_ADDR_EQUAL(laddr,
<cp->tcp_bound_source_v6) &&
break;
}
}
/* The port number is busy */
} else {
/*
* This port is ours. Insert in fanout and mark as
* bound to prevent others from getting the port
* number.
*/
/*
* We don't want tcp_next_port_to_try to "inherit"
* a port number supplied by the user in a bind.
*/
if (user_specified)
return (port);
/*
* This is the only place where tcp_next_port_to_try
* is updated. After the update, it may or may not
* be in the valid range.
*/
if (!tcp->tcp_anon_priv_bind)
return (port);
}
if (tcp->tcp_anon_priv_bind) {
} else {
if (count == 0 && user_specified) {
/*
* We may have to return an anonymous port. So
* get one to start with.
*/
port =
} else {
B_FALSE);
}
}
if (port == 0)
break;
/*
* Don't let this loop run forever in the case where
* all of the anonymous ports are in use.
*/
return (0);
}
/*
* tcp_clean_death / tcp_close_detached must not be called more than once
* on a tcp. Thus every function that potentially calls tcp_clean_death
* must check for the tcp state before calling tcp_clean_death.
* Eg. tcp_input, tcp_rput_data, tcp_eager_kill, tcp_clean_death_wrapper,
* tcp_timer_handler, all check for the tcp state.
*/
/* ARGSUSED */
void
{
ETIMEDOUT, 5);
}
/*
* We are dying for some reason. Try to do it gracefully. (May be called
* as writer.)
*
* Return -1 if the structure was not cleaned up (if the cleanup had to be
* done by a service procedure).
* TBD - Should the return value distinguish between the tcp_t being
* freed and it being reinitialized?
*/
static int
{
queue_t *q;
#endif
if (tcp->tcp_linger_tid != 0 &&
}
if (TCP_IS_DETACHED(tcp)) {
if (tcp->tcp_hard_binding) {
/*
* Its an eager that we are dealing with. We close the
* eager but in case a conn_ind has already gone to the
* listener, let tcp_accept_finish() send a discon_ind
* to the listener and drop the last reference. If the
* listener doesn't even know about the eager i.e. the
* conn_ind hasn't gone up, blow away the eager and drop
* the last reference as well. If the conn_ind has gone
* up, state should be BOUND. tcp_accept_finish
* will figure out that the connection has received a
* RST and will send a DISCON_IND to the application.
*/
if (!tcp->tcp_tconnind_started) {
} else {
}
} else {
}
return (0);
}
/*
* If T_ORDREL_IND has not been sent yet (done when service routine
* is run) postpone cleaning up the endpoint until service routine
* has sent up the T_ORDREL_IND. Avoid clearing out an existing
* client_errno since tcp_close uses the client_errno field.
*/
if (err != 0)
return (-1);
}
/* If sodirect, not anymore */
}
/* Trash all inbound data */
/*
* If we are at least part way open and there is error
* (err==0 implies no error)
* notify our client by a T_DISCON_IND.
*/
!TCP_IS_SOCKET(tcp)) {
/*
* Send M_FLUSH according to TPI. Because sockets will
* (and must) ignore FLUSHR we do that only for TPI
* endpoints and sockets in STREAMS mode.
*/
}
"tcp_clean_death: discon err %d", err);
}
} else {
"tcp_clean_death, sending M_ERROR");
}
}
/* SYN_SENT or SYN_RCVD */
/* ESTABLISHED or CLOSE_WAIT */
}
}
return (-1);
}
/*
* In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
* to expire, stop the wait and finish the close.
*/
static void
{
tcp->tcp_linger_tid = 0;
if (tcp->tcp_flow_stopped) {
}
if (tcp->tcp_timer_tid != 0) {
tcp->tcp_timer_tid = 0;
}
/*
* Need to cancel those timers which will not be used when
* TCP is detached. This has to be done before the tcp_wq
* is set to the global queue.
*/
goto finish;
}
/*
* If delta is zero the timer event wasn't executed and was
* successfully canceled. In this case we need to restart it
* with the minimal delta possible.
*/
if (delta >= 0) {
}
} else {
}
/* Signal closing thread that it can complete close */
}
/*
* Handle lingering timeouts. This function is called when the SO_LINGER timeout
* expires.
*/
static void
tcp_close_linger_timeout(void *arg)
{
}
static int
{
/*
*
* Mark the conn as closing. ill_pending_mp_add will not
* add any mp to the pending mp list, after this conn has
* started closing. Same for sq_pending_mp_add
*/
/*
* tcp_closemp_used is used below without any protection of a lock
* as we don't expect any one else to use it concurrently at this
* point otherwise it would be a major defect.
*/
else
while (!tcp->tcp_closed) {
/*
* The cv_wait_sig() was interrupted. We now do the
* following:
*
* 1) If the endpoint was lingering, we allow this
* to be interrupted by cancelling the linger timeout
* and closing normally.
*
* 2) Revert to calling cv_wait()
*
* We revert to using cv_wait() to avoid an
* infinite loop which can occur if the calling
* thread is higher priority than the squeue worker
* thread and is bound to the same cpu.
*/
/* Entering squeue, bump ref count. */
}
break;
}
}
while (!tcp->tcp_closed)
/*
* In the case of listener streams that have eagers in the q or q0
* we wait for the eagers to drop their reference to us. tcp_rq and
* tcp_wq of the eagers point to our queues. By waiting for the
* refcnt to drop to 1, we are sure that the eagers have cleaned
* up their queue pointers and also dropped their references to us.
*/
if (tcp->tcp_wait_for_eagers) {
}
}
/*
* ioctl cleanup. The mp is queued in the
* ill_pending_mp or in the sq_pending_mp.
*/
qprocsoff(q);
/*
* Drop IP's reference on the conn. This is the last reference
* on the connp if the state was less than established. If the
* connection has gone into timewait state, then we will have
* one ref for the TCP and one more ref (total of two) for the
* classifier connected hash list (a timewait connections stays
* in connected hash till closed).
*
* We can't assert the references because there might be other
* transient reference places because of some walkers or queued
* packets in squeue for the timewait state.
*/
return (0);
}
static int
{
/*
* We had opened an acceptor STREAM for sockfs which is
* now being closed due to some error.
*/
qprocsoff(q);
return (0);
}
/*
* Called by tcp_close() routine via squeue when lingering is
* interrupted by a signal.
*/
/* ARGSUSED */
static void
{
if (tcp->tcp_linger_tid != 0 &&
}
}
/*
* Called by streams close routine via squeues when our client blows off her
* descriptor, we take this to mean: "close the stream state NOW, close the tcp
* connection politely" When SO_LINGER is set (with a non-zero linger time and
* it is not a nonblocking socket) then this routine sleeps until the FIN is
* acked.
*
* NOTE: tcp_close potentially returns error when lingering.
* However, the stream head currently does not pass these errors
* to the application. 4.4BSD only returns EINTR and EWOULDBLOCK
* errors to the application (from tsleep()) and not errors
* like ECONNRESET caused by receiving a reset packet.
*/
/* ARGSUSED */
static void
{
char *msg;
/* Cancel any pending timeout */
if (tcp->tcp_ordrelid != 0) {
if (tcp->tcp_timeout) {
}
tcp->tcp_ordrelid = 0;
}
/* Cleanup for listener */
tcp_eager_cleanup(tcp, 0);
}
case TCPS_CLOSED:
case TCPS_IDLE:
case TCPS_BOUND:
case TCPS_LISTEN:
break;
case TCPS_SYN_SENT:
msg = "tcp_close, during connect";
break;
case TCPS_SYN_RCVD:
/*
* Close during the connect 3-way handshake
* but here there may or may not be pending data
* already on queue. Process almost same as in
* the ESTABLISHED state.
*/
/* FALLTHRU */
default:
/* Ok, no more sodirect */
}
/*
* If SO_LINGER has set a zero linger time, abort the
* connection with a reset.
*/
msg = "tcp_close, zero lingertime";
break;
}
/*
* Abort connection if there is unread data queued.
*/
msg = "tcp_close, unread data";
break;
}
/*
* tcp_hard_bound is now cleared thus all packets go through
* tcp_lookup. This fact is used by tcp_detach below.
*
* We have done a qwait() above which could have possibly
* drained more messages in turn causing transition to a
* different state. Check whether we have to do the rest
* of the processing or not.
*/
break;
/*
* Transmit the FIN before detaching the tcp_t.
* no longer owns the tcp_t thus others can modify it.
*/
(void) tcp_xmit_end(tcp);
/*
* If lingering on close then wait until the fin is acked,
*/
!(tcp->tcp_fin_acked) &&
} else if (tcp->tcp_client_errno == 0) {
/* tcp_close_linger_timeout will finish close */
if (tcp->tcp_linger_tid == 0)
else
return;
}
/*
* Check if we need to detach or just close
* the instance.
*/
break;
}
/*
* Make sure that no other thread will access the tcp_rq of
* this instance (through lookups etc.) as tcp_rq will go
* away shortly.
*/
if (tcp->tcp_flow_stopped) {
}
if (tcp->tcp_timer_tid != 0) {
tcp->tcp_timer_tid = 0;
}
/*
* Need to cancel those timers which will not be used when
* TCP is detached. This has to be done before the tcp_wq
* is set to the global queue.
*/
goto finish;
}
/*
* If delta is zero the timer event wasn't executed and was
* successfully canceled. In this case we need to restart it
* with the minimal delta possible.
*/
if (delta >= 0)
goto finish;
}
/* Detach did not complete. Still need to remove q from stream. */
if (msg) {
}
/*
* Although packets are always processed on the correct
* tcp's perimeter and access is serialized via squeue's,
* IP still needs a queue when sending packets in time_wait
* state so use WR(tcps_g_q) till ip_output() can be
* changed to deal with just connp. For read side, we
* could have set tcp_rq to NULL but there are some cases
* in tcp_rput_data() from early days of this code which
* do a putnext without checking if tcp is closed. Those
* need to be identified before both tcp_rq and tcp_wq
* can be set to NULL and tcps_g_q can disappear forever.
*/
/*
* Don't change the queues in the case of a listener that has
* eagers in its q or q0. It could surprise the eagers.
* Instead wait for the eagers outside the squeue.
*/
if (!tcp->tcp_wait_for_eagers) {
/*
* When default queue is closing we set tcps_g_q to NULL
* after the close is done.
*/
}
/* Signal tcp_close() to finish closing. */
}
/*
* Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
* Some stream heads get upset if they see these later on as anything but NULL.
*/
static void
{
do {
}
}
/* Do detached close. */
static void
{
/*
* Clustering code serializes TCP disconnect callbacks and
* cluster tcp list walks by blocking a TCP disconnect callback
* if a cluster tcp list walk is in progress. This ensures
* accurate accounting of TCPs in the cluster code even though
* the TCP list walk itself is not atomic.
*/
}
/*
* Stop all TCP timers, and free the timer mblks if requested.
*/
void
{
if (tcp->tcp_timer_tid != 0) {
tcp->tcp_timer_tid = 0;
}
if (tcp->tcp_ka_tid != 0) {
tcp->tcp_ka_tid = 0;
}
if (tcp->tcp_ack_tid != 0) {
tcp->tcp_ack_tid = 0;
}
if (tcp->tcp_push_tid != 0) {
tcp->tcp_push_tid = 0;
}
}
/*
* The tcp_t is going away. Remove it from all lists and set it
* to TCPS_CLOSED. The freeing up of memory is deferred until
* tcp_inactive. This is needed since a thread in tcp_rput might have
* done a CONN_INC_REF on this structure before it was removed from the
* hashes.
*/
static void
{
if (!TCP_IS_SOCKET(tcp))
tcp->tcp_ibsegs = 0;
tcp->tcp_obsegs = 0;
/*
* If we are an eager connection hanging off a listener that
* hasn't formally accepted the connection yet, get off his
* list and blow off any data that we have accumulated.
*/
/*
* tcp_tconnind_started == B_TRUE means that the
* conn_ind has already gone to listener. At
* this point, eager will be closed but we
* leave it in listeners eager list so that
* if listener decides to close without doing
* accept, we can clean this up. In tcp_wput_accept
* we take care of the case of accept on closed
* eager.
*/
if (!tcp->tcp_tconnind_started) {
/*
* We don't want to have any pointers to the
* listener queue, after we have released our
* reference on the listener
*/
} else {
}
}
/* Stop all the timers */
if (tcp->tcp_ip_addr_cache) {
IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
}
}
if (tcp->tcp_flow_stopped)
/*
* If the tcp_time_wait_collector (which runs outside the squeue)
* is trying to remove this tcp from the time wait list, we will
* block in tcp_time_wait_remove while trying to acquire the
* tcp_time_wait_lock. The logic in tcp_time_wait_collector also
* requires the ipcl_hash_remove to be ordered after the
* tcp_time_wait_remove for the refcnt checks to work correctly.
*/
/*
* Delete the cached ire in conn_ire_cache and also mark
* the conn as CONDEMNED
*/
/* Need to cleanup any pending ioctls */
/* Release any SSL context */
}
}
}
/*
* tcp is dying (called from ipcl_conn_destroy and error cases).
* Free the tcp_t in either case.
*/
void
{
/* Free b_next chain */
}
}
}
}
}
}
tcp->tcp_hopoptslen = 0;
}
tcp->tcp_dstoptslen = 0;
}
tcp->tcp_rtdstoptslen = 0;
}
tcp->tcp_rthdrlen = 0;
}
/*
*/
/*
* Following is really a blowing away a union.
* It happens to have exactly two members of identical size
* the following code is enough.
*/
}
}
/*
* Put a connection confirmation message upstream built from the
* address information within 'iph' and 'tcph'. Report our success or failure.
*/
static boolean_t
{
int optlen = 0;
/*
* Return in T_CONN_CON results of option negotiation through
* the T_CONN_REQ. Note: If there is an real end-to-end option
* negotiation, then what is received from remote end needs
* to be taken into account but there is no such thing (yet?)
* Note: We do not use mi_offset_param() here as
* tcp_opts_conn_req contents do not directly come from
* an application and are either generated in kernel or
* from user input that was already verified.
*/
optlen = (int)
}
/* packet is IPv4 */
} else {
}
} else {
}
if (!mp)
return (B_FALSE);
}
else
return (B_TRUE);
}
/*
* Defense for the SYN attack -
* 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest
* one from the list of droppable eagers. This list is a subset of q0.
* see comments before the definition of MAKE_DROPPABLE().
* 2. Don't drop a SYN request before its first timeout. This gives every
* request at least til the first timeout to complete its 3-way handshake.
* 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many
* requests currently on the queue that has timed out. This will be used
* as an indicator of whether an attack is under way, so that appropriate
* actions can be taken. (It's incremented in tcp_timer() and decremented
* either when eager goes into ESTABLISHED, or gets freed up.)
* 4. The current threshold is - # of timeout > q0len/4 => SYN alert on
* # of timeout drops back to <= q0len/32 => SYN alert off
*/
static boolean_t
{
/* Pick oldest eager from the list of droppable eagers */
/* If list is empty. return B_FALSE */
return (B_FALSE);
}
/* If allocated, the mp will be freed in tcp_clean_death_wrapper() */
return (B_FALSE);
/*
* Take this eager out from the list of droppable eagers since we are
* going to drop it.
*/
"tcp_drop_q0: listen half-open queue (max=%d) overflow"
}
/* Put a reference on the conn as we are enqueueing it in the sqeue */
/* Mark the IRE created for this SYN request temporary */
return (B_TRUE);
}
int
{
int err;
int ifindex = 0;
if (ipvers == IPV4_VERSION) {
if (tcp->tcp_recvdstaddr) {
(char *)&tcp,
(t_scalar_t)sizeof (intptr_t),
} else {
}
} else {
/* db_cksumstuff is set at ip_fanout_tcp_v6 */
DB_CKSUMSTUFF(mp) = 0;
/* Pass up the scope_id of remote addr */
} else {
sin6.sin6_scope_id = 0;
}
if (tcp->tcp_recvdstaddr) {
} else {
}
}
return (ENOMEM);
if (tcps->tcps_trace)
/* Inherit information from the "parent" */
return (err);
}
if (ipvers == IPV4_VERSION) {
return (err);
}
} else {
/* ifindex must be already set */
if (ltcp->tcp_bound_if != 0) {
/*
* Set newtcp's bound_if equal to
* listener's value. If ifindex is
* not the same as ltcp->tcp_bound_if,
* it must be a packet for the ipmp group
* of interfaces
*/
}
tcp->tcp_recvifindex = 0;
}
/*
* Listener had options of some sort; eager inherits.
* Free up the eager template and allocate one
* of the right size.
*/
if (tcp->tcp_hdr_grown) {
} else {
}
tcp->tcp_iphc_len = 0;
return (ENOMEM);
}
}
/*
* Copy the IP+TCP header template from listener to eager
*/
IPPROTO_RAW) {
sizeof (ip6i_t));
} else {
}
} else {
}
} else {
/*
* only valid case when ipversion of listener and
* eager differ is when listener is IPv6 and
* eager is IPv4.
* Eager header template has been initialized to the
* maximum v4 header sizes, which includes space for
* TCP and IP options.
*/
/* copy IP header fields individually */
}
sizeof (in_port_t));
sizeof (in_port_t));
}
/* Source routing option copyover (reverse it) */
if (tcps->tcps_rev_src_routes)
} else {
}
/*
* If the SYN contains a credential, it's a loopback packet; attach
* the credential to the TPI message.
*/
}
/* Inherit the listener's SSL protection state */
}
return (0);
}
int
{
int err;
if (ltcp->tcp_recvdstaddr) {
} else {
}
return (ENOMEM);
}
if (tcps->tcps_trace) {
}
/* Inherit information from the "parent" */
return (err);
}
/*
* Let's make sure that eager tcp template has enough space to
* copy IPv4 listener's tcp template. Since the conn_t structure is
* preserved and tcp_iphc_len is also preserved, an eager conn_t may
* have a tcp_template of total len TCP_MAX_COMBINED_HEADER_LENGTH or
* more (in case of re-allocation of conn_t with tcp-IPv6 template with
* extension headers or with ip6i_t struct). Note that bcopy() below
* copies listener tcp's hdr_len which cannot be greater than TCP_MAX_
* COMBINED_HEADER_LENGTH as this listener must be a IPv4 listener.
*/
/* Copy the IP+TCP header template from listener to eager */
/* Initialize the IP addresses and Ports */
/* Source routing option copyover (reverse it) */
if (tcps->tcps_rev_src_routes)
/*
* If the SYN contains a credential, it's a loopback packet; attach
* the credential to the TPI message.
*/
}
/* Inherit the listener's SSL protection state */
}
return (0);
}
/*
* sets up conn for ipsec.
* if the first mblk is M_CTL it is consumed and mpp is updated.
* in case of error mpp is freed.
*/
conn_t *
{
return (NULL);
}
return (NULL);
}
return (NULL);
}
} else {
}
DB_CKSUMSTART(mp) = 0;
if (ipvers == IPV4_VERSION) {
} else {
return (NULL);
}
}
/*
* The caller already ensured that there is a sqp present.
*/
ipsec_in_t *ii;
return (NULL);
}
}
return (NULL);
}
/*
* If we know we have some policy, pass the "IPSEC"
* options size TCP uses this adjust the MSS.
*/
if (mctl_present) {
}
return (econnp);
}
/*
*
* tcp_get_conn is used to get a clean tcp connection structure.
* It tries to reuse the connections put on the freelist by the
* time_wait_collector failing which it goes to kmem_cache. This
* way has two benefits compared to just allocating from and
* freeing to kmem_cache.
* 1) The time_wait_collector can free (which includes the cleanup)
* outside the squeue. So when the interrupt comes, we have a clean
* connection sitting in the freelist. Obviously, this buys us
* performance.
*
* has multiple disadvantages - tying up the squeue during alloc, and the
* fact that IPSec policy initialization has to happen here which
* requires us sending a M_CTL and checking for it i.e. real ugliness.
* we can't check the 'q' and 'q0' which are protected by squeue and
* blindly allocate memory which might have to be freed here if we are
* not allowed to accept the connection. By using the freelist and
* accept the connection.
*
* Care should be taken to put the conn back in the same squeue's freelist
* from which it was allocated. Best results are obtained if conn is
* allocated from listener's squeue and freed to the same. Time wait
* collector will free up the freelist is the connection ends up sitting
* there for too long.
*/
void *
{
netstack_t *ns;
return ((void *)connp);
}
return (NULL);
return ((void *)connp);
}
/*
* Update the cached label for the given tcp_t. This should be called once per
* connection, and before any packets are sent or tcp_process_options is
* invoked. Returns B_FALSE if the correct label could not be constructed.
*/
static boolean_t
{
int added;
return (B_FALSE);
if (added == -1)
return (B_FALSE);
tcp->tcp_hdr_len);
if (added == -1)
return (B_FALSE);
}
} else {
return (B_FALSE);
return (B_FALSE);
return (B_FALSE);
}
return (B_TRUE);
}
/* BEGIN CSTYLED */
/*
*
* The sockfs ACCEPT path:
* =======================
*
* The eager is now established in its own perimeter as soon as SYN is
* received in tcp_conn_request(). When sockfs receives conn_ind, it
* completes the accept processing on the acceptor STREAM. The sending
* on the listener perimeter.
*
* Common control flow for 3 way handshake:
* ----------------------------------------
*
* incoming SYN (listener perimeter) -> tcp_rput_data()
* -> tcp_conn_request()
*
* incoming SYN-ACK-ACK (eager perim) -> tcp_rput_data()
* send T_CONN_IND (listener perim) -> tcp_send_conn_ind()
*
* Sockfs ACCEPT Path:
* -------------------
*
* open acceptor stream (tcp_open allocates tcp_wput_accept()
* as STREAM entry point)
*
* soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_wput_accept()
*
* tcp_wput_accept() extracts the eager and makes the q->q_ptr <-> eager
* association (we are not behind eager's squeue but sockfs is protecting us
* and no one knows about this stream yet. The STREAMS entry point q->q_info
* is changed to point at tcp_wput().
*
* tcp_wput_accept() sends any deferred eagers via tcp_send_pending() to
* listener (done on listener's perimeter).
*
* tcp_wput_accept() calls tcp_accept_finish() on eagers perimeter to finish
* accept.
*
* ---------------------------
*
* soaccept() sends T_CONN_RES on the listener STREAM.
*
* tcp_accept() -> tcp_accept_swap() complete the processing and send
* the bind_mp to eager perimeter to finish accept (tcp_rput_other()).
*
* Locks:
* ======
*
* listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and
* and listeners->tcp_eager_next_q.
*
* Referencing:
* ============
*
* 1) We start out in tcp_conn_request by eager placing a ref on
* listener and listener adding eager to listeners->tcp_eager_next_q0.
*
* 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before
* doing so we place a ref on the eager. This ref is finally dropped at the
* end of tcp_accept_finish() while unwinding from the squeue, i.e. the
* reference is dropped by the squeue framework.
*
* 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish
*
* The reference must be released by the same entity that added the reference
* In the above scheme, the eager is the entity that adds and releases the
* references. Note that tcp_accept_finish executes in the squeue of the eager
* (albeit after it is attached to the acceptor stream). Though 1. executes
* in the listener's squeue, the eager is nascent at this point and the
* reference can be considered to have been added on behalf of the eager.
*
* Eager getting a Reset or listener closing:
* ==========================================
*
* Once the listener and eager are linked, the listener never does the unlink.
* If the listener needs to close, tcp_eager_cleanup() is called which queues
* a message on all eager perimeter. The eager then does the unlink, clears
* any pointers to the listener's queue and drops the reference to the
* listener. The listener waits in tcp_close outside the squeue until its
* refcount has dropped to 1. This ensures that the listener has waited for
* all eagers to clear their association with the listener.
*
* Similarly, if eager decides to go away, it can unlink itself and close.
* When the T_CONN_RES comes down, we check if eager has closed. Note that
* the reference to eager is still valid because of the extra ref we put
* in tcp_send_conn_ind.
*
* Listener can always locate the eager under the protection
* of the listener->tcp_eager_lock, and then do a refhold
* on the eager during the accept processing.
*
* The acceptor stream accesses the eager in the accept processing
* based on the ref placed on eager before sending T_conn_ind.
* The only entity that can negate this refhold is a listener close
* which is mutually exclusive with an active acceptor stream.
*
* Eager's reference on the listener
* ===================================
*
* If the accept happens (even on a closed eager) the eager drops its
* reference on the listener at the start of tcp_accept_finish. If the
* eager is killed due to an incoming RST before the T_conn_ind is sent up,
* the reference is dropped in tcp_closei_local. If the listener closes,
* the reference is dropped in tcp_eager_kill. In all cases the reference
* is dropped while executing in the eager's context (squeue).
*/
/* END CSTYLED */
/* Process the SYN packet, mp, directed at the listener 'tcp' */
/*
* THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN.
* tcp_rput_data will not see any SYN packets.
*/
/* ARGSUSED */
void
{
int err;
goto error2;
"tcp_conn_request: listen backlog (max=%d) "
"overflow (%d pending) on %s",
}
goto error2;
}
if (tcp->tcp_conn_req_cnt_q0 >=
/*
* Q0 is full. Drop a pending half-open req from the queue
* to make room for the new SYN req. Also mark the time we
* drop a SYN.
*
* A more aggressive defense against SYN attack will
* be to set the "tcp_syn_defense" flag now.
*/
if (!tcp_drop_q0(tcp)) {
"tcp_conn_request: listen half-open queue "
"(max=%d) full (%d pending) on %s",
}
goto error2;
}
}
/*
* IP adds STRUIO_EAGER and ensures that the received packet is
* M_DATA even if conn_ipv6_recvpktinfo is enabled or for ip6
* link local address. If IPSec is enabled, db_struioflag has
* STRUIO_POLICY set (mutually exclusive from STRUIO_EAGER);
* otherwise an error case if neither of them is set.
*/
DB_CKSUMSTART(mp) = 0;
goto error2;
/*
* mp is updated in tcp_get_ipsec_conn().
*/
/*
* mp freed by tcp_get_ipsec_conn.
*/
return;
}
} else {
goto error2;
}
if (ipvers == IPV4_VERSION) {
} else {
}
} else {
}
if (err)
goto error3;
/* Inherit various TCP parameters from the listener */
/*
* tcp_adapt_ire() may change tcp_rwnd according to the ire metrics.
* If it does not, the eager's receive window will be set to the
* listener's receive window later in this function.
*/
/*
* Inherit listener's tcp_init_cwnd. Need to do this before
* calling tcp_process_options() where tcp_mss_set() is called
* to set the initial cwnd.
*/
/*
* Zones: tcp_adapt_ire() and tcp_send_data() both need the
* zone id before the accept is completed in tcp_wput_accept().
*/
/* Copy nexthop information from listener to eager */
if (connp->conn_nexthop_set) {
}
/*
* TSOL: tsol_input_proc() needs the eager's cred before the
* eager is accepted
*/
/*
* If the caller has the process-wide flag set, then default to MAC
* exempt mode. This allows read-down to unlabeled hosts.
*/
if (is_system_labeled()) {
else
} else {
}
char *, "eager connp(1) label on SYN mp(2) failed",
goto error3;
}
}
/*
* No need to check for multicast destination since ip will only pass
* up multicasts to those that have expressed interest
* TODO: what about rejecting broadcasts?
* Also check that source is not a multicast or broadcast address.
*/
/*
* There should be no ire in the mp as we are being called after
* receiving the SYN.
*/
/*
* Adapt our mss, ttl, ... according to information provided in IRE.
*/
/* Undo the bind_hash_insert */
goto error3;
}
/* Process all TCP options. */
/* Is the other end ECN capable? */
}
/*
* listener->tcp_rq->q_hiwat should be the default window size or a
* window size changed via SO_RCVBUF option. First round up the
* eager's tcp_rwnd to the nearest MSS. Then find out the window
* scale option value if needed. Call tcp_rwnd_set() to finish the
* setting.
*
* Note if there is a rpipe metric associated with the remote host,
* we should not inherit receive window size from listener.
*/
if (eager->tcp_snd_ws_ok)
/*
* Note that this is the only place tcp_rwnd_set() is called for
* accepting a connection. We need to call it here instead of
* after the 3-way handshake because we need to tell the other
* side our rwnd in the SYN-ACK segment.
*/
/*
* We eliminate the need for sockfs to send down a T_SVR4_OPTMGMT_REQ
* via soaccept()->soinheritoptions() which essentially applies
* all the listener options to the new STREAM. The options that we
* need to take care of are:
* SO_DEBUG, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE, SO_BROADCAST,
* SO_USELOOPBACK, SO_OOBINLINE, SO_DGRAM_ERRIND, SO_LINGER,
* SO_SNDBUF, SO_RCVBUF.
*
* SO_RCVBUF: tcp_rwnd_set() above takes care of it.
* SO_SNDBUF: Set the tcp_xmit_hiwater for the eager. When
* tcp_maxpsz_set() gets called later from
* tcp_accept_finish(), the option takes effect.
*
*/
/* Set the TCP options */
if (tcp->tcp_ka_enabled)
/* Set the IP options */
/* Put a ref on the listener for the eager. */
/* Set tcp_listener before adding it to tcp_conn_fanout */
/*
* Tag this detached tcp vector for later retrieval
* by our listener client in tcp_accept().
*/
/*
* -1 is "special" and defined in TPI as something
* that should never be used in T_CONN_IND
*/
}
if (tcp->tcp_syn_defense) {
/* Don't drop the SYN that comes from a good IP source */
}
}
/*
* We need to insert the eager in its own perimeter but as soon
* as we do that, we expose the eager to the classifier and
* should not touch any field outside the eager's perimeter.
* So do all the work necessary before inserting the eager
* in its own perimeter. Be optimistic that ipcl_conn_insert()
* will succeed but undo everything if it fails.
*/
/*
* Increment the ref count as we are going to
* enqueueing an mp in squeue
*/
goto error;
}
/*
* We need to start the rto timer. In normal case, we start
* the timer after sending the packet on the wire (or at
* least believing that packet was sent by waiting for
* CALL_IP_WPUT() to return). Since this is the first packet
* being sent on the wire for the eager, our initial tcp_rto
* is at least tcp_rexmit_interval_min which is a fairly
* large value to allow the algorithm to adjust slowly to large
* fluctuations of RTT during first few transmissions.
*
* Starting the timer first and then sending the packet in this
* case shouldn't make much difference since tcp_rexmit_interval_min
* is of the order of several 100ms and starting the timer
* first and then sending the packet will result in difference
* of few micro seconds.
*
* Without this optimization, we are forced to hold the fanout
* lock across the ipcl_bind_insert() and sending the packet
* so that we don't race against an incoming packet (maybe RST)
* for this eager.
*
* It is necessary to acquire an extra reference on the eager
* at this point and hold it until after tcp_send_data() to
* ensure against an eager close race.
*/
/*
* Insert the eager in its own perimeter now. We are ready to deal
* with any packets on eager.
*/
goto error;
}
} else {
goto error;
}
}
/* mark conn as fully-bound */
/* Send the SYN-ACK */
return;
/*
* If a connection already exists, send the mp to that connections so
* that it can be appropriately dealt with.
*/
if (!IPCL_IS_CONNECTED(econnp)) {
/*
* Something bad happened. ipcl_conn_insert()
* failed because a connection already existed
* in connected hash but we can't find it
* anymore (someone blew it away). Just
* free this message and hopefully remote
* will retransmit at which time the SYN can be
* treated as a new connection or dealth with
* a TH_RST if a connection already exists.
*/
} else {
}
} else {
/* Nobody wants this packet */
}
return;
}
/*
* In an ideal case of vertical partition in NUMA architecture, its
* beneficial to have the listener and all the incoming connections
* tied to the same squeue. The other constraint is that incoming
* connections should be tied to the squeue attached to interrupted
* CPU for obvious locality reason so this leaves the listener to
* be tied to the same squeue. Our only problem is that when listener
* is binding, the CPU that will get interrupted by the NIC whose
* IP address the listener is binding to is not even known. So
* the code below allows us to change that binding at the time the
* CPU is interrupted by virtue of incoming connection's squeue.
*
* This is usefull only in case of a listener bound to a specific IP
* address. For other kind of listeners, they get bound the
* very first time and there is no attempt to rebind them.
*/
void
{
} else {
goto done;
}
goto done;
/*
* No one from read or write side can access us now
* except for already queued packets on this squeue.
* But since we haven't changed the squeue yet, they
* can't execute. If they are processed after we have
* changed the squeue, they are sent back to the
* correct squeue down below.
* But a listner close can race with processing of
* incoming SYN. If incoming SYN processing changes
* the squeue then the listener close which is waiting
* to enter the squeue would operate on the wrong
* squeue. Hence we don't change the squeue here unless
* the refcount is exactly the minimum refcount. The
* minimum refcount of 4 is counted as - 1 each for
* TCP and IP, 1 for being in the classifier hash, and
* 1 for the mblk being processed.
*/
goto done;
}
}
do {
}
done:
} else {
}
}
/*
* Successful connect request processing begins when our client passes
* a T_CONN_REQ message into tcp_wput() and ends when tcp_rput() passes
* our T_OK_ACK reply message upstream. The control flow looks like this:
* upstream -> tcp_wput() -> tcp_wput_proto() -> tcp_connect() -> IP
* upstream <- tcp_rput() <- IP
* After various error checks are completed, tcp_connect() lays
* the target address and port into the composite header template,
* preallocates the T_OK_ACK reply message, construct a full 12 byte bind
* request followed by an IRE request, and passes the three mblk message
* down to IP looking like this:
* O_T_BIND_REQ for IP --> IRE req --> T_OK_ACK for our client
* Processing continues in tcp_rput() when we receive the following message:
* T_BIND_ACK from IP --> IRE ack --> T_OK_ACK for our client
* After consuming the first two mblks, tcp_rput() calls tcp_timer(),
* to fire off the connection request, and then passes the T_OK_ACK mblk
* upstream that we filled in below. There are, of course, numerous
* error conditions along the way which truncate the processing described
* above.
*/
static void
{
struct T_conn_req *tcr;
return;
}
/*
* Determine packet type based on type of address passed in
* the request should contain an IPv4 or IPv6 address.
* Make sure that address family matches the type of
* family of the the address passed down
*/
switch (tcr->DEST_length) {
default:
return;
/*
* XXX: The check for valid DEST_length was not there
* in earlier releases and some buggy
* TLI apps (e.g Sybase) got away with not feeding
* in sin_zero part of address.
* We allow that bug to keep those buggy apps humming.
* Test suites require the check on DEST_length.
* We construct a new mblk with valid DEST_length
* free the original so the rest of the code does
* not have to keep track of this special shorter
* length address case.
*/
struct T_conn_req *ntcr;
return;
}
/* Get pointer to shorter address to copy from original mp */
return;
}
/* Note:nsin->sin_zero zero-fill with sin_null assign above */
if (tcr->OPT_length != 0) {
tcr->OPT_length);
}
}
/* FALLTHRU */
case sizeof (sin_t):
sizeof (sin_t));
return;
}
return;
}
return;
}
return;
}
break;
case sizeof (sin6_t):
sizeof (sin6_t));
return;
}
return;
}
return;
}
break;
}
/*
* should key on their sequence number and cut them loose.
*/
/*
* If options passed in, feed it for verification and handling
*/
if (tcr->OPT_length != 0) {
conn_opts_mp = NULL;
if (do_disconnect) {
ECONNREFUSED, 0);
if (!discon_mp) {
return;
}
if (!ok_mp) {
return;
}
} else {
}
return;
}
/*
* Success in setting options, the mp option buffer represented
* by OPT_length/offset has been potentially modified and
* contains results of option processing. We copy it in
* another mp to save it for potentially influencing returning
* it in T_CONN_CONN.
*/
if (!conn_opts_mp) {
return;
}
/*
* Note:
* These resulting option negotiation can include any
* end-to-end negotiation options but there no such
*/
}
}
/*
* If we're connecting to an IPv4-mapped IPv6 address, we need to
* make sure that the template IP header in the tcp structure is an
* IPv4 header, and that the tcp_ipversion is IPV4_VERSION. We
* need to this before we call tcp_bindi() so that the port lookup
* code will look for ports in the correct port space (IPv4 and
* IPv6 have separate port spaces).
*/
int err = 0;
if (err != 0) {
goto connect_failed;
}
}
if (tcp->tcp_issocket) {
/*
* TCP is _D_SODIRECT and sockfs is directly above so save
* the shared sonode sodirect_t pointer (if any) to enable
* TCP sodirect.
*/
}
case TCPS_IDLE:
/*
* We support quick connect, refer to comments in
* tcp_connect_*()
*/
/* FALLTHRU */
case TCPS_BOUND:
case TCPS_LISTEN:
return;
}
/*
* Destination adress is mapped IPv6 address.
* Source bound address should be unspecified or
* IPv6 mapped address as well.
*/
if (!IN6_IS_ADDR_UNSPECIFIED(
&tcp->tcp_bound_source_v6) &&
break;
}
} else {
srcid = 0;
}
return;
default:
break;
}
/*
* Note: Code below is the "failure" case
*/
/* return error ack and blow away saved option results if any */
else {
}
}
/*
* Handle connect to IPv4 destinations, including connections for AF_INET6
* sockets connecting to IPv4 mapped IPv6 destinations.
*/
static void
{
/* Check for attempt to connect to INADDR_ANY */
if (dstaddr == INADDR_ANY) {
/*
* SunOS 4.x and 4.3 BSD allow an application
* to connect a TCP socket to INADDR_ANY.
* When they do this, the kernel picks the
* address of one interface and uses it
* instead. The kernel usually ends up
* picking the address of the loopback
* interface. This is an undocumented feature.
* However, we provide the same thing here
* in order to have source and binary
* compatibility with SunOS 4.x.
* generate the T_CONN_CON.
*/
}
/* Handle __sin6_src_id if socket not bound to an IP address */
}
/*
* Don't let an endpoint connect to itself. Note that
* the test here does not catch the case where the
* source IP addr was left unspecified by the user. In
* this case, the source addr is set in tcp_adapt_ire()
* using the reply to the T_BIND message that we send
* down to IP here and the check is repeated in tcp_rput_other.
*/
goto failed;
}
/*
* Massage a source route if any putting the first hop
* in iph_dst. Compute a starting value for the checksum which
* takes into account that the original iph_dst should be
* included in the checksum but that ip will include the
* first hop in the source route in the tcp checksum.
*/
/*
* At this point the remote destination address and remote port fields
* in the tcp-four-tuple have been filled in the tcp structure. Now we
* have to see which state tcp was in so we can take apropriate action.
*/
/*
* We support a quick connect capability here, allowing
* clients to transition directly from IDLE to SYN_SENT
* tcp_bindi will pick an unused port, insert the connection
* in the bind hash and transition to BOUND state.
*/
if (lport == 0) {
goto failed;
}
}
/*
* TODO: allow data with connect requests
* by unlinking M_DATA trailers here and
* linking them in behind the T_OK_ACK mblk.
* The tcp_rput() bind ack handler would then
* feed them to tcp_wput_data() rather than call
* tcp_timer().
*/
if (!mp) {
goto failed;
}
sizeof (ipa_conn_t));
} else {
sizeof (ipa6_conn_t));
}
if (mp1) {
/*
* We need to make sure that the conn_recv is set to a non-null
* value before we insert the conn_t into the classifier table.
* This is to avoid a race with an incoming packet which does
* an ipcl_classify().
*/
/* Hang onto the T_OK_ACK for later. */
else {
&tcp->tcp_sticky_ipp);
}
/*
* If the bind cannot complete immediately
* IP will arrange to call tcp_rput_other
* when the bind completes.
*/
return;
}
/* Error case */
/* return error ack and blow away saved option results if any */
else {
}
}
/*
* Handle connect to IPv6 destinations.
*/
static void
{
/*
* If we're here, it means that the destination address is a native
* IPv6 address. Return an error if tcp_ipversion is not IPv6. A
* reason why it might not be IPv6 is if the socket was bound to an
* IPv4-mapped IPv6 address.
*/
goto failed;
}
/*
* Interpret a zero destination to mean loopback.
* generate the T_CONN_CON.
*/
if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp)) {
}
/* Handle __sin6_src_id if socket not bound to an IP address */
}
/*
* Take care of the scope_id now and add ip6i_t
* if ip6i_t is not already allocated through TCP
* sticky options. At this point tcp_ip6h does not
* have dst info, thus use dstaddrp.
*/
if (scope_id != 0 &&
/* Already allocated */
} else {
int reterr;
ip2dbg(("tcp_connect_v6: SCOPE_ID set\n"));
if (reterr != 0)
goto failed;
ip1dbg(("tcp_connect_ipv6: tcp_bld_hdrs returned\n"));
}
}
/*
* Don't let an endpoint connect to itself. Note that
* the test here does not catch the case where the
* source IP addr was left unspecified by the user. In
* this case, the source addr is set in tcp_adapt_ire()
* using the reply to the T_BIND message that we send
* down to IP here and the check is repeated in tcp_rput_other.
*/
goto failed;
}
/*
* Massage a routing header (if present) putting the first hop
* in ip6_dst. Compute a starting value for the checksum which
* takes into account that the original ip6_dst should be
* included in the checksum but that ip will include the
* first hop in the source route in the tcp checksum.
*/
} else {
}
/*
* At this point the remote destination address and remote port fields
* in the tcp-four-tuple have been filled in the tcp structure. Now we
* have to see which state tcp was in so we can take apropriate action.
*/
/*
* We support a quick connect capability here, allowing
* clients to transition directly from IDLE to SYN_SENT
* tcp_bindi will pick an unused port, insert the connection
* in the bind hash and transition to BOUND state.
*/
if (lport == 0) {
goto failed;
}
}
/*
* TODO: allow data with connect requests
* by unlinking M_DATA trailers here and
* linking them in behind the T_OK_ACK mblk.
* The tcp_rput() bind ack handler would then
* feed them to tcp_wput_data() rather than call
* tcp_timer().
*/
if (!mp) {
goto failed;
}
if (mp1) {
/*
* We need to make sure that the conn_recv is set to a non-null
* value before we insert the conn_t into the classifier table.
* This is to avoid a race with an incoming packet which does
* an ipcl_classify().
*/
/* Hang onto the T_OK_ACK for later. */
&tcp->tcp_sticky_ipp);
/* ip_bind_v6() may return ACK or ERROR */
return;
}
/* Error case */
/* return error ack and blow away saved option results if any */
else {
}
}
/*
* We need a stream q for detached closing tcp connections
* to use. Our client hereby indicates that this q is the
* one to use.
*/
static void
{
#ifdef NS_DEBUG
(void) printf("TCP_IOC_DEFAULT_Q for stack %d\n",
#endif
} else {
} else {
/*
* We are passing tcp_sticky_ipp as NULL
* as it is not useful for tcp_default queue
*
* Set conn_recv just in case.
*/
}
}
}
/*
* Our client hereby directs us to reject the connection request
* that tcp_conn_request() marked with 'seqnum'. Rejection consists
* of sending the appropriate RST, not an ICMP error.
*/
static void
{
return;
}
/*
* Right now, upper modules pass down a T_DISCON_REQ to TCP,
* when the stream is in BOUND state. Do not send a reset,
* since the destination IP address is not valid, and it can
* be the initialized value of all zeros (broadcast address).
*
* If TCP has sent down a bind request to IP and has not
* received the reply, reject the request. Otherwise, TCP
* will be confused.
*/
}
return;
}
/*
* According to TPI, for non-listeners, ignore seqnum
* and disconnect.
* Following interpretation of -1 seqnum is historical
* and implied TPI ? (TPI only states that for T_CONN_IND,
* a valid seqnum should not be -1).
*
* -1 means disconnect everything
* regardless even on a listener.
*/
/*
* The connection can't be on the tcp_time_wait_head list
* since it is not detached.
*/
/*
* If it used to be a listener, check to make sure no one else
* has taken the port before switching back to LISTEN state.
*/
} else {
/* Allow tcp_bound_if listeners? */
}
} else if (old_state > TCPS_BOUND) {
tcp->tcp_conn_req_max = 0;
}
} else if (old_state == TCPS_ESTABLISHED ||
old_state == TCPS_CLOSE_WAIT) {
}
if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
(tcp->tcp_conn_req_cnt_q != 0)) {
tcp_eager_cleanup(tcp, 0);
}
if (old_state >= TCPS_ESTABLISHED) {
/* Send M_FLUSH according to TPI */
}
if (mp)
return;
return;
}
/* Send M_FLUSH according to TPI */
}
if (mp)
}
/*
* Diagnostic routine used to return a string associated with the tcp state.
* Note that if the caller does not supply a buffer, it will use an internal
* static string. This means that if multiple threads call this function at
* the same time, output can be corrupted... Note also that this function
* does not check the size of the supplied buffer. The caller has to make
* sure that it is big enough.
*/
static char *
{
char buf1[30];
char *buf;
char *cp;
char local_addrbuf[INET6_ADDRSTRLEN];
char remote_addrbuf[INET6_ADDRSTRLEN];
else
return ("NULL_TCP");
case TCPS_CLOSED:
cp = "TCP_CLOSED";
break;
case TCPS_IDLE:
cp = "TCP_IDLE";
break;
case TCPS_BOUND:
cp = "TCP_BOUND";
break;
case TCPS_LISTEN:
cp = "TCP_LISTEN";
break;
case TCPS_SYN_SENT:
cp = "TCP_SYN_SENT";
break;
case TCPS_SYN_RCVD:
cp = "TCP_SYN_RCVD";
break;
case TCPS_ESTABLISHED:
cp = "TCP_ESTABLISHED";
break;
case TCPS_CLOSE_WAIT:
cp = "TCP_CLOSE_WAIT";
break;
case TCPS_FIN_WAIT_1:
cp = "TCP_FIN_WAIT_1";
break;
case TCPS_CLOSING:
cp = "TCP_CLOSING";
break;
case TCPS_LAST_ACK:
cp = "TCP_LAST_ACK";
break;
case TCPS_FIN_WAIT_2:
cp = "TCP_FIN_WAIT_2";
break;
case TCPS_TIME_WAIT:
cp = "TCP_TIME_WAIT";
break;
default:
break;
}
switch (format) {
case DISP_ADDR_AND_PORT:
/*
* Note that we use the remote address in the tcp_b
* structure. This means that it will print out
* the real destination address, not the next hop's
* address if source routing is used.
*/
} else {
}
sizeof (local_addrbuf));
sizeof (remote_addrbuf));
break;
case DISP_PORT_ONLY:
default:
break;
}
return (buf);
}
/*
* Called via squeue to get on to eager's perimeter. It sends a
* TH_RST if eager is in the fanout table. The listener wants the
* eager to disappear either by means of tcp_eager_blowoff() or
* tcp_eager_cleanup() being called. tcp_eager_kill() can also be
* called (via squeue) if the eager cannot be inserted in the
* fanout table in tcp_conn_request().
*/
/* ARGSUSED */
void
{
/*
* We could be called because listener is closing. Since
* the eager is using listener's queue's, its not safe.
* Better use the default queue just to send the TH_RST
* out.
*/
/*
* An eager's conn_fanout will be NULL if it's a duplicate
* for an existing 4-tuples in the conn fanout table.
* We don't want to send an RST out in such case.
*/
tcp_xmit_ctl("tcp_eager_kill, can't wait",
}
/* We are here because listener wants this eager gone */
if (eager->tcp_tconnind_started) {
/*
* The eager has sent a conn_ind up to the
* listener but listener decides to close
* instead. We need to drop the extra ref
* placed on eager in tcp_rput_data() before
* sending the conn_ind to listener.
*/
}
}
}
/*
* Reset any eager connection hanging off this listener marked
* with 'seqnum' and then reclaim it's resources.
*/
static boolean_t
{
do {
return (B_FALSE);
}
if (eager->tcp_closemp_used) {
return (B_TRUE);
}
return (B_TRUE);
}
/*
* Reset any eager connection hanging off this listener
* and then reclaim it's resources.
*/
static void
{
if (!q0_only) {
/* First cleanup q */
if (!eager->tcp_closemp_used) {
}
}
}
/* Then cleanup q0 */
if (!eager->tcp_closemp_used) {
}
}
}
/*
* If we are an eager connection hanging off a listener that hasn't
* formally accepted the connection yet, get off his list and blow off
* any data that we have accumulated.
*/
static void
{
/* Remove the eager tcp from q0 */
/*
* Take the eager out, if it is in the list of droppable
* eagers.
*/
if (tcp->tcp_syn_rcvd_timeout != 0) {
/* we have timed out before */
}
} else {
/*
* If we are unlinking the last
* element on the list, adjust
* tail pointer. Set tail pointer
* to nil when list is empty.
*/
if (listener->tcp_eager_last_q ==
NULL;
} else {
/*
* We won't get here if there
* is only one eager in the
* list.
*/
prev;
}
}
break;
}
}
}
}
/* Shorthand to generate and send TPI error acks to our client */
static void
{
}
/* Shorthand to generate and send TPI error acks to our client */
static void
{
struct T_error_ack *teackp;
}
}
/*
* Note: No locks are held when inspecting tcp_g_*epriv_ports
* but instead the code relies on:
* - the fact that the address of the array and its size never changes
* - the atomic assignment of the elements of the array
*/
/* ARGSUSED */
static int
{
int i;
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
if (tcps->tcps_g_epriv_ports[i] != 0)
tcps->tcps_g_epriv_ports[i]);
}
return (0);
}
/*
* Hold a lock while changing tcp_g_epriv_ports to prevent multiple
* threads from changing it at the same time.
*/
/* ARGSUSED */
static int
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
return (EINVAL);
}
/* Check if the value is already in the list */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
return (EEXIST);
}
}
/* Find an empty slot */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
if (tcps->tcps_g_epriv_ports[i] == 0)
break;
}
if (i == tcps->tcps_g_num_epriv_ports) {
return (EOVERFLOW);
}
/* Set the new value */
return (0);
}
/*
* Hold a lock while changing tcp_g_epriv_ports to prevent multiple
* threads from changing it at the same time.
*/
/* ARGSUSED */
static int
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
new_value >= 65536) {
return (EINVAL);
}
/* Check that the value is already in the list */
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
break;
}
if (i == tcps->tcps_g_num_epriv_ports) {
return (ESRCH);
}
/* Clear the value */
tcps->tcps_g_epriv_ports[i] = 0;
return (0);
}
static int
{
case TCPS_IDLE:
return (TS_UNBND);
case TCPS_LISTEN:
/*
* Return whether there are outstanding T_CONN_IND waiting
* for the matching T_CONN_RES. Therefore don't count q0.
*/
if (tcp->tcp_conn_req_cnt_q > 0)
return (TS_WRES_CIND);
else
return (TS_IDLE);
case TCPS_BOUND:
return (TS_IDLE);
case TCPS_SYN_SENT:
return (TS_WCON_CREQ);
case TCPS_SYN_RCVD:
/*
* Note: assumption: this has to the active open SYN_RCVD.
* The passive instance is detached in SYN_RCVD stage of
* incoming connection processing so we cannot get request
* for T_info_ack on it.
*/
return (TS_WACK_CRES);
case TCPS_ESTABLISHED:
return (TS_DATA_XFER);
case TCPS_CLOSE_WAIT:
return (TS_WREQ_ORDREL);
case TCPS_FIN_WAIT_1:
return (TS_WIND_ORDREL);
case TCPS_FIN_WAIT_2:
return (TS_WIND_ORDREL);
case TCPS_CLOSING:
case TCPS_LAST_ACK:
case TCPS_TIME_WAIT:
case TCPS_CLOSED:
/*
* Following TS_WACK_DREQ7 is a rendition of "not
* yet TS_IDLE" TPI state. There is no best match to any
* TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we
* state of TSTATECHNG (state is process of changing) which
* captures what this dummy state represents.
*/
return (TS_WACK_DREQ7);
default:
return (TS_UNBND);
}
}
static void
{
else
*tia = tcp_g_t_info_ack;
/* Not yet set - tcp_open does not set mss */
else
} else {
}
/* TODO: Default ETSDU is 1. Is that correct for tcp? */
}
/*
* This routine responds to T_CAPABILITY_REQ messages. It is called by
* tcp_wput. Much of the T_CAPABILITY_ACK information is copied from
* tcp_g_t_info_ack. The current state of the stream is copied from
* tcp_state.
*/
static void
{
struct T_capability_ack *tcap;
return;
}
return;
}
if (cap_bits1 & TC1_ACCEPTOR_ID) {
}
}
/*
* This routine responds to T_INFO_REQ messages. It is called by tcp_wput.
* Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack.
* The current state of the stream is copied from tcp_state.
*/
static void
{
if (!mp) {
return;
}
}
/* Respond to the TPI addr request */
static void
{
struct T_addr_ack *taa;
/* Make it large enough for worst case */
return;
}
return;
}
/*
* Note: Following code assumes 32 bit alignment of basic
* data structures like sin_t and struct T_addr_ack.
*/
/*
* Fill in local address
*/
/* Fill zeroes and then intialize non-zero fields */
/*
* Fill in Remote address
*/
}
}
}
/* Assumes that tcp_addr_req gets enough space and alignment */
static void
{
struct T_addr_ack *taa;
2 * sizeof (sin6_t));
/*
* Note: Following code assumes 32 bit alignment of basic
* data structures like sin6_t and struct T_addr_ack.
*/
/*
* Fill in local address
*/
/*
* Fill in Remote address
*/
}
}
}
/*
* Handle reinitialization of a tcp structure.
* Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
*/
static void
{
int err;
/* tcp_reinit should never be called for detached tcp_t's */
/* Cancel outstanding timers */
/*
* Reset everything in the state vector, after updating global
* MIB data from instance counters.
*/
tcp->tcp_ibsegs = 0;
tcp->tcp_obsegs = 0;
if (tcp->tcp_snd_zcopy_aware)
if (tcp->tcp_flow_stopped &&
}
/* Free b_next chain */
tcp->tcp_rcv_cnt = 0;
}
}
}
}
/*
* Following is a union with two members which are
* identical types and size so the following cleanup
* is enough.
*/
/*
* The connection can't be on the tcp_time_wait_head list
* since it is not detached.
*/
if (tcp->tcp_kssl_pending) {
/* Don't reset if the initialized by bind. */
}
}
}
/*
*/
if (tcp->tcp_conn_req_max != 0) {
/*
* This is the case when a TLI program uses the same
* transport end point to accept a connection. This
* makes the TCP both a listener and acceptor. When
* this connection is closed, we need to set the state
* back to TCPS_LISTEN. Make sure that the eager list
* is reinitialized.
*
* Note that this stream is still bound to the four
* tuples of the previous connection in IP. If a new
* SYN with different foreign address comes in, IP will
* not find it and will send it to the global queue. In
* the global queue, TCP will do a tcp_lookup_listener()
* to find this stream. This works because this stream
* is only removed from connected hash.
*
*/
} else {
}
} else {
}
/*
* Initialize to default values
* Can't fail since enough header template space already allocated
* at open().
*/
/* Restore state in tcp_tcph */
else
/*
* Copy of the src addr. in tcp_t is needed in tcp_t
* since the lookup funcs can only lookup on tcp_t
*/
}
/*
* Force values to zero that need be zero.
* Do not touch values asociated with the BOUND or LISTEN state
* since the connection will end up in that state after the reinit.
* NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
* structure!
*/
static void
{
#ifndef lint
#define DONTCARE(x)
#define PRESERVE(x)
#else
#define DONTCARE(x) ((x) = (x))
#define PRESERVE(x) ((x) = (x))
#endif /* lint */
/* Should be ASSERT NULL on these with new code! */
}
tcp->tcp_valid_bits = 0;
tcp->tcp_last_rcv_lbolt = 0;
tcp->tcp_init_cwnd = 0;
tcp->tcp_urp_last_valid = 0;
tcp->tcp_hard_binding = 0;
tcp->tcp_hard_bound = 0;
tcp->tcp_fin_acked = 0;
tcp->tcp_fin_rcvd = 0;
tcp->tcp_fin_sent = 0;
tcp->tcp_ordrel_done = 0;
tcp->tcp_dontroute = 0;
tcp->tcp_broadcast = 0;
tcp->tcp_useloopback = 0;
tcp->tcp_reuseaddr = 0;
tcp->tcp_oobinline = 0;
tcp->tcp_dgram_errind = 0;
tcp->tcp_detached = 0;
tcp->tcp_bind_pending = 0;
tcp->tcp_unbind_pending = 0;
tcp->tcp_deferred_clean_death = 0;
tcp->tcp_linger = 0;
tcp->tcp_ka_enabled = 0;
tcp->tcp_zero_win_probe = 0;
tcp->tcp_loopback = 0;
tcp->tcp_localnet = 0;
tcp->tcp_syn_defense = 0;
tcp->tcp_set_timer = 0;
tcp->tcp_active_open = 0;
tcp->tcp_mdt_hdr_head = 0;
tcp->tcp_mdt_hdr_tail = 0;
tcp->tcp_conn_def_q0 = 0;
tcp->tcp_anon_priv_bind = 0;
}
}
tcp->tcp_rcv_ws = 0;
tcp->tcp_snd_ws = 0;
tcp->tcp_ts_recent = 0;
tcp->tcp_if_mtu = 0;
tcp->tcp_cwnd_cnt = 0;
tcp->tcp_rtt_update = 0;
tcp->tcp_rack_cnt = 0;
tcp->tcp_rack_cur_max = 0;
tcp->tcp_rack_abs_max = 0;
tcp->tcp_max_swnd = 0;
tcp->tcp_lingertime = 0;
tcp->tcp_client_errno = 0;
tcp->tcp_last_sent_len = 0;
tcp->tcp_dupack_cnt = 0;
/*
* If tcp_tracing flag is ON (i.e. We have a trace buffer
* in tcp structure and now tracing), Re-initialize all
* members of tcp_traceinfo.
*/
}
} else {
}
tcp->tcp_bound_if = 0;
tcp->tcp_ipv6_recvancillary = 0;
tcp->tcp_recvifindex = 0;
tcp->tcp_recvhops = 0;
tcp->tcp_closed = 0;
tcp->tcp_cleandeathtag = 0;
tcp->tcp_hopoptslen = 0;
}
tcp->tcp_dstoptslen = 0;
}
tcp->tcp_rtdstoptslen = 0;
}
tcp->tcp_rthdrlen = 0;
}
/* Reset fusion-related fields */
tcp->tcp_fuse_rcv_hiwater = 0;
tcp->tcp_fuse_rcv_unread_cnt = 0;
tcp->tcp_in_ack_unsent = 0;
/* Sodirect */
#ifdef DEBUG
#endif
}
/*
* Allocate necessary resources and initialize state vector.
* Guaranteed not to fail so that when an error is returned,
* the caller doesn't need to do any additional cleanup.
*/
int
{
int err;
return (err);
}
static int
{
int err;
/*
* Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
* will be close to tcp_rexmit_interval_initial. By doing this, we
* allow the algorithm to adjust slowly to large fluctuations of RTT
* during first few transmissions of a connection as seen in slow
* links.
*/
tcp->tcp_timer_backoff = 0;
tcp->tcp_ms_we_have_waited = 0;
/*
* Fix it to tcp_ip_abort_linterval later if it turns out to be a
* passive open.
*/
/* NOTE: ISS is now set in tcp_adapt_ire(). */
tcp->tcp_mdt_hdr_head = 0;
tcp->tcp_mdt_hdr_tail = 0;
/* Reset fusion-related fields */
tcp->tcp_fuse_rcv_hiwater = 0;
tcp->tcp_fuse_rcv_unread_cnt = 0;
/* Sodirect */
/* Initialize the header template */
} else {
}
if (err)
return (err);
/*
* Init the window scale to the max so tcp_rwnd_set() won't pare
* down tcp_rwnd. tcp_adapt_ire() will set the right value later.
*/
/*
* Init the tcp_debug option. This value determines whether TCP
* calls strlog() to print out debug messages. Doing this
* initialization here means that this value is not inherited thru
* tcp_reinit().
*/
return (0);
}
/*
* Initialize the IPv4 header. Loses any record of any IP options.
*/
static int
{
/*
* This is a simple initialization. If there's
* already a template, it should never be too small,
* so reuse it. Otherwise, allocate space for the new one.
*/
tcp->tcp_iphc_len = 0;
return (ENOMEM);
}
}
/* options are gone; may need a new label */
/*
* IP wants our header length in the checksum field to
* allow it to perform a single pseudo-header+checksum
* calculation on behalf of TCP.
* Include the adjustment for a source route once IP_OPTIONS is set.
*/
return (0);
}
/*
* Initialize the IPv6 header. Loses any record of any IPv6 extension headers.
*/
static int
{
/*
* This is a simple initialization. If there's
* already a template, it should never be too small,
* so reuse it. Otherwise, allocate space for the new one.
* Ensure that there is enough space to "downgrade" the tcp_t
* to an IPv4 tcp_t. This requires having space for a full load
* of IPv4 options, as well as a full load of TCP options
* (TCP_MAX_COMBINED_HEADER_LENGTH, 120 bytes); this is more space
* than a v6 header and a TCP header with a full load of TCP options
* (IPV6_HDR_LEN is 40 bytes; TCP_MAX_HDR_LENGTH is 60 bytes).
* We want to avoid reallocation in the "downgraded" case when
* processing outbound IPv4 options.
*/
tcp->tcp_iphc_len = 0;
return (ENOMEM);
}
}
/* options are gone; may need a new label */
/* Initialize the header template */
/*
* IP wants our header length in the checksum field to
* allow it to perform a single psuedo-header+checksum
* calculation on behalf of TCP.
* Include the adjustment for a source route when IPV6_RTHDR is set.
*/
return (0);
}
/* At minimum we need 8 bytes in the TCP header for the lookup */
#define ICMP_MIN_TCP_HDR 8
/*
* tcp_icmp_error is called by tcp_rput_other to process ICMP error messages
* passed up by IP. The message is always received on the correct tcp_t.
* Assumes that IP has pulled up everything up to and including the ICMP header.
*/
void
{
int iph_hdr_length;
/* Assume IP provides aligned packets - otherwise toss */
return;
}
/*
* Since ICMP errors are normal data marked with M_CTL when sent
* to TCP or UDP, we have to look for a IPSEC_IN value to identify
* packets starting with an ipsec_info_t, see ipsec_info.h.
*/
if ((mp_size == sizeof (ipsec_info_t)) &&
/* IP should have done this */
ipsec_mctl = B_TRUE;
}
/*
* Verify that we have a complete outer IP header. If not, drop it.
*/
return;
}
/*
* Verify IP version. Anything other than IPv4 or IPv6 packet is sent
* upstream. ICMPv6 is handled in tcp_icmp_error_ipv6.
*/
switch (IPH_HDR_VERSION(ipha)) {
case IPV6_VERSION:
return;
case IPV4_VERSION:
break;
default:
goto noticmpv4;
}
/* Skip past the outer IP and ICMP headers */
/*
* If we don't have the correct outer IP header length or if the ULP
* is not IPPROTO_ICMP or if we don't have a complete inner IP header
* send it upstream.
*/
if (iph_hdr_length < sizeof (ipha_t) ||
goto noticmpv4;
}
/* Skip past the inner IP and find the ULP header */
/*
* If we don't have the correct inner IP header length or if the ULP
* is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR
* bytes of TCP header, drop it.
*/
if (iph_hdr_length < sizeof (ipha_t) ||
goto noticmpv4;
}
if (TCP_IS_DETACHED_NONEAGER(tcp)) {
if (ipsec_mctl) {
} else {
}
if (secure) {
/*
* If we are willing to accept this in clear
* we don't have to verify policy.
*/
/*
* tcp_check_policy called
* ip_drop_packet() on failure.
*/
return;
}
}
}
} else if (ipsec_mctl) {
/*
* This is a hard_bound connection. IP has already
* verified policy. We don't have to do it again.
*/
}
/*
* TCP SHOULD check that the TCP sequence number contained in
* payload of the ICMP error message is within the range
* SND.UNA <= SEG.SEQ < SND.NXT.
*/
/*
* If the ICMP message is bogus, should we kill the
* connection, or should we just drop the bogus ICMP
* message? It would probably make more sense to just
* drop the message so that if this one managed to get
* in, the real connection should not suffer.
*/
goto noticmpv4;
}
switch (icmph->icmph_type) {
case ICMP_DEST_UNREACHABLE:
switch (icmph->icmph_code) {
/*
* Reduce the MSS based on the new MTU. This will
* eliminate any fragmentation locally.
* N.B. There may well be some funny side-effects on
* the local send policy and the remote receive policy.
* Pending further research, we provide
* tcp_ignore_path_mtu just in case this proves
* disastrous somewhere.
*
* After updating the MSS, retransmit part of the
* dropped segment using the new mss by calling
* tcp_wput_data(). Need to adjust all those
* params to make sure tcp_wput_data() work properly.
*/
if (tcps->tcps_ignore_path_mtu)
break;
/*
* Decrease the MSS by time stamp options
* IP options and IPSEC options. tcp_hdr_len
* includes time stamp option and IP option
* length.
*/
/*
* Only update the MSS if the new one is
* smaller than the previous one. This is
* to avoid problems when getting multiple
* ICMP errors for the same MTU.
*/
break;
/*
* Stop doing PMTU if new_mss is less than 68
* or less than tcp_mss_min.
* The value 68 comes from rfc 1191.
*/
0;
/*
* Make sure we have something to
* send.
*/
/*
* Shrink tcp_cwnd in
*/
(tcp->tcp_unsent == 0)) {
} else {
}
tcp->tcp_dupack_cnt = 0;
}
break;
case ICMP_PORT_UNREACHABLE:
case TCPS_SYN_SENT:
case TCPS_SYN_RCVD:
/*
* ICMP can snipe away incipient
* TCP connections as long as
* seq number is same as initial
* send seq number.
*/
(void) tcp_clean_death(tcp,
ECONNREFUSED, 6);
}
break;
}
break;
case ICMP_HOST_UNREACHABLE:
case ICMP_NET_UNREACHABLE:
/* Record the error in case we finally time out. */
else
/*
* Ditch the half-open connection if we
* suspect a SYN attack is under way.
*/
(void) tcp_clean_death(tcp,
}
}
break;
default:
break;
}
break;
case ICMP_SOURCE_QUENCH: {
/*
* use a global boolean to control
* whether TCP should respond to ICMP_SOURCE_QUENCH.
* The default is false.
*/
if (tcp_icmp_source_quench) {
/*
* Reduce the sending rate as if we got a
* retransmit timeout
*/
tcp->tcp_cwnd_cnt = 0;
}
break;
}
}
}
/*
* tcp_icmp_error_ipv6 is called by tcp_rput_other to process ICMPv6
* error messages passed up by IP.
* Assumes that IP has pulled up all the extension headers as well
* as the ICMPv6 header.
*/
static void
{
/*
* The caller has determined if this is an IPSEC_IN packet and
* set ipsec_mctl appropriately (see tcp_icmp_error).
*/
if (ipsec_mctl)
/*
* Verify that we have a complete IP header. If not, send it upstream.
*/
return;
}
/*
* Verify this is an ICMPV6 packet, else send it upstream.
*/
&nexthdrp) ||
*nexthdrp != IPPROTO_ICMPV6) {
goto noticmpv6;
}
/*
* Verify if we have a complete ICMP and inner IP header.
*/
goto noticmpv6;
goto noticmpv6;
/*
* Validate inner header. If the ULP is not IPPROTO_TCP or if we don't
* have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the
* packet.
*/
if ((*nexthdrp != IPPROTO_TCP) ||
goto noticmpv6;
}
/*
* ICMP errors come on the right queue or come on
* get switched to the right queue. If it comes on the
* right queue, policy check has already been done by IP
* and thus free the first_mp without verifying the policy.
* If it has come for a non-hard bound connection, we need
* to verify policy as IP may not have done it.
*/
if (!tcp->tcp_hard_bound) {
if (ipsec_mctl) {
} else {
}
if (secure) {
/*
* If we are willing to accept this in clear
* we don't have to verify policy.
*/
/*
* tcp_check_policy called
* ip_drop_packet() on failure.
*/
return;
}
}
}
} else if (ipsec_mctl) {
/*
* This is a hard_bound connection. IP has already
* verified policy. We don't have to do it again.
*/
}
/*
* TCP SHOULD check that the TCP sequence number contained in
* payload of the ICMP error message is within the range
* SND.UNA <= SEG.SEQ < SND.NXT.
*/
/*
* If the ICMP message is bogus, should we kill the
* connection, or should we just drop the bogus ICMP
* message? It would probably make more sense to just
* drop the message so that if this one managed to get
* in, the real connection should not suffer.
*/
goto noticmpv6;
}
switch (icmp6->icmp6_type) {
case ICMP6_PACKET_TOO_BIG:
/*
* Reduce the MSS based on the new MTU. This will
* eliminate any fragmentation locally.
* N.B. There may well be some funny side-effects on
* the local send policy and the remote receive policy.
* Pending further research, we provide
* tcp_ignore_path_mtu just in case this proves
* disastrous somewhere.
*
* After updating the MSS, retransmit part of the
* dropped segment using the new mss by calling
* tcp_wput_data(). Need to adjust all those
* params to make sure tcp_wput_data() work properly.
*/
if (tcps->tcps_ignore_path_mtu)
break;
/*
* Decrease the MSS by time stamp options
* IP options and IPSEC options. tcp_hdr_len
* includes time stamp option and IP option
* length.
*/
/*
* Only update the MSS if the new one is
* smaller than the previous one. This is
* to avoid problems when getting multiple
* ICMP errors for the same MTU.
*/
break;
/*
* Make sure we have something to
* send.
*/
/*
* Shrink tcp_cwnd in
*/
(tcp->tcp_unsent == 0)) {
} else {
}
tcp->tcp_dupack_cnt = 0;
}
break;
case ICMP6_DST_UNREACH:
switch (icmp6->icmp6_code) {
case ICMP6_DST_UNREACH_NOPORT:
(void) tcp_clean_death(tcp,
ECONNREFUSED, 8);
}
break;
case ICMP6_DST_UNREACH_ADMIN:
case ICMP6_DST_UNREACH_ADDR:
/* Record the error in case we finally time out. */
/*
* Ditch the half-open connection if we
* suspect a SYN attack is under way.
*/
(void) tcp_clean_death(tcp,
}
}
break;
default:
break;
}
break;
case ICMP6_PARAM_PROB:
/* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
(void) tcp_clean_death(tcp,
ECONNREFUSED, 10);
}
break;
}
break;
case ICMP6_TIME_EXCEEDED:
default:
break;
}
}
/*
* IP recognizes seven kinds of bind requests:
*
* - A zero-length address binds only to the protocol number.
*
* - A 4-byte address is treated as a request to
* validate that the address is a valid local IPv4
* address, appropriate for an application to bind to.
* IP does the verification, but does not make any note
* of the address at this time.
*
* - A 16-byte address contains is treated as a request
* to validate a local IPv6 address, as the 4-byte
* address case above.
*
* - A 16-byte sockaddr_in to validate the local IPv4 address and also
* use it for the inbound fanout of packets.
*
* - A 24-byte sockaddr_in6 to validate the local IPv6 address and also
* use it for the inbound fanout of packets.
*
* - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout
* information consisting of local and remote addresses
* and ports. In this case, the addresses are both
* validated as appropriate for this operation, and, if
* so, the information is retained for use in the
* inbound fanout.
*
* - A 36-byte address address (ipa6_conn_t) containing complete IPv6
* fanout information, like the 12-byte case above.
*
* IP will also fill in the IRE request mblk with information
* regarding our peer. In all cases, we notify IP of our protocol
* type by appending a single protocol byte to the bind request.
*/
static mblk_t *
{
char *cp;
struct T_bind_req *tbr;
ipa_conn_t *ac;
if (!mp)
return (mp);
tbr->CONIND_number = 0;
switch (addr_length) {
case sizeof (ipa_conn_t):
return (NULL);
}
/* cp known to be 32 bit aligned */
break;
case sizeof (ipa6_conn_t):
return (NULL);
}
/* cp known to be 32 bit aligned */
} else {
}
break;
case sizeof (sin_t):
/*
* NOTE: IPV6_ADDR_LEN also has same size.
* Use family to discriminate.
*/
break;
} else {
}
break;
case sizeof (sin6_t):
break;
case IP_ADDR_LEN:
break;
}
/* Add protocol number to end */
return (mp);
}
/*
* Notify IP that we are having trouble with this connection. IP should
* blow the IRE away and start over.
*/
static void
{
/* IPv6 has NUD thus notification to delete the IRE is not needed */
return;
return;
return;
}
/*
* Note: in the case of source routing we want to blow away the
* route to the first source route hop.
*/
}
/* Unlink and return any mblk that looks like it contains an ire */
static mblk_t *
{
for (;;) {
break;
case IRE_DB_TYPE:
case IRE_DB_REQ_TYPE:
return (mp);
default:
break;
}
}
return (mp);
}
/*
* Timer callback routine for keepalive probe. We do a fake resend of
* last ACKed byte. Then set a timer using RTO. When the timer expires,
* check to see if we have heard anything from the other end for the last
* RTO period. If we have, set the timer to expire for another
* tcp_keepalive_intrvl and check again. If we have not, set a timer using
* RTO << 1 and check again when it expires. Keep exponentially increasing
* the timeout if we have not heard from the other side. If for more than
* (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything,
* kill the connection unless the keepalive abort threshold is 0. In
* that case, we will probe "forever."
*/
static void
tcp_keepalive_killer(void *arg)
{
tcp->tcp_ka_tid = 0;
return;
/*
* Keepalive probe should only be sent if the application has not
* done a close on the connection.
*/
return;
}
/* Timer fired too early, restart it. */
return;
}
/*
* If we have not heard from the other side for a long
* time, kill the connection unless the keepalive abort
* threshold is 0. In that case, we will probe "forever."
*/
if (tcp->tcp_ka_abort_thres != 0 &&
return;
}
/* Fake resend of last ACKed byte. */
/*
* if allocation failed, fall through to start the
* timer back.
*/
if (tcp->tcp_ka_last_intrvl != 0) {
int max;
/*
* We should probe again at least
* in ka_intrvl, but not more than
* tcp_rexmit_interval_max.
*/
} else {
}
return;
}
}
} else {
tcp->tcp_ka_last_intrvl = 0;
}
/* firetime can be negative if (mp1 == NULL || mp == NULL) */
}
}
int
{
int maxpsz;
if (TCP_IS_DETACHED(tcp))
return (mss);
/*
* Set the sd_qn_maxpsz according to the socket send buffer
* size, and sd_maxblk to INFPSZ (-1). This will essentially
* instruct the stream head to copyin user data into contiguous
* kernel-allocated buffers without breaking it up into smaller
* chunks. We round up the buffer size to the nearest SMSS.
*/
else
} else {
/*
* Set sd_qn_maxpsz to approx half the (receivers) buffer
* (and a multiple of the mss). This instructs the stream
* head to break down larger than SMSS writes into SMSS-
* size mblks, up to tcp_maxpsz_multiplier mblks at a time.
*/
/* Round up to nearest mss */
}
}
if (set_maxblk)
(void) mi_set_sth_maxblk(q, mss);
return (mss);
}
/*
* Extract option values from a tcp header. We put any found values into the
* tcpopt struct and return a bitmask saying which options were found.
*/
static int
{
int len;
int found = 0;
switch (*up) {
case TCPOPT_EOL:
break;
case TCPOPT_NOP:
up++;
continue;
case TCPOPT_MAXSEG:
if (len < TCPOPT_MAXSEG_LEN ||
break;
/* Caller must handle tcp_mss_min and tcp_mss_max_* */
up += TCPOPT_MAXSEG_LEN;
continue;
case TCPOPT_WSCALE:
break;
else
up += TCPOPT_WS_LEN;
continue;
case TCPOPT_SACK_PERMITTED:
if (len < TCPOPT_SACK_OK_LEN ||
break;
up += TCPOPT_SACK_OK_LEN;
continue;
case TCPOPT_SACK:
break;
/* If TCP is not interested in SACK blks... */
continue;
}
up += TCPOPT_HEADER_LEN;
/*
* If the list is empty, allocate one and assume
* nothing is sack'ed.
*/
&(tcp->tcp_num_notsack_blk),
&(tcp->tcp_cnt_notsack_list));
/*
* Make sure tcp_notsack_list is not NULL.
* This happens when kmem_alloc(KM_NOSLEEP)
* returns NULL.
*/
continue;
}
}
while (sack_len > 0) {
break;
}
up += 4;
up += 4;
sack_len -= 8;
/*
* Bounds checking. Make sure the SACK
* info is within tcp_suna and tcp_snxt.
* If this SACK blk is out of bound, ignore
* it but continue to parse the following
* blks.
*/
continue;
}
&(tcp->tcp_num_notsack_blk),
&(tcp->tcp_cnt_notsack_list));
}
}
continue;
case TCPOPT_TSTAMP:
if (len < TCPOPT_TSTAMP_LEN ||
break;
up += TCPOPT_TSTAMP_LEN;
continue;
default:
break;
continue;
}
break;
}
return (found);
}
/*
* Set the mss associated with a particular tcp based on its current value,
* and a new one passed in. Observe minimums and maximums, and reset
* other state variables that we want to view as multiples of mss.
*
* This function is called mainly because values like tcp_mss, tcp_cwnd,
* highwater marks etc. need to be initialized or adjusted.
* packet arrives.
* 2) We need to set a new MSS when ICMP_FRAGMENTATION_NEEDED or
* ICMP6_PACKET_TOO_BIG arrives.
* 3) From tcp_paws_check() if the other side stops sending the timestamp,
* to increase the MSS to use the extra bytes available.
*
* Callers except tcp_paws_check() ensure that they only reduce mss.
*/
static void
{
else
/*
* Unless naglim has been set by our client to
* a non-mss value, force naglim to track mss.
* This can help to aggregate small writes.
*/
/*
* TCP should be able to buffer at least 4 MSS data for obvious
* performance reason.
*/
if (do_ss) {
/*
* Either the tcp_cwnd is as yet uninitialized, or mss is
* changing due to a reduction in MTU, presumably as a
* result of a new path component, reset cwnd to its
* "initial" value, as a multiple of the new mss.
*/
} else {
/*
* Called by tcp_paws_check(), the mss increased
* marginally to allow use of space previously taken
* by the timestamp option. It would be inappropriate
* to apply slow start or tcp_init_cwnd values to
* tcp_cwnd, simply adjust to a multiple of the new mss.
*/
tcp->tcp_cwnd_cnt = 0;
}
}
static int
{
}
static int
{
}
static int
{
int err;
return (0);
return (EINVAL);
if (!(flag & SO_ACCEPTOR)) {
/*
* Special case for install: miniroot needs to be able to
* access files via NFS as though it were always in the
* global zone.
*/
} else {
netstack_t *ns;
/*
* For exclusive stacks we set the zoneid to zero
* to make TCP operate as if in the global zone.
*/
else
}
/*
* For stackid zero this is done from strplumb.c, but
* non-zero stackids are handled here.
*/
}
}
} else {
/*
* Either minor numbers in the large arena were exhausted
* or a non socket application is doing the open.
* Try to allocate from the small arena.
*/
return (EBUSY);
}
}
if (flag & SO_ACCEPTOR) {
/* No netstack_find_by_cred, hence no netstack_rele needed */
q->q_qinfo = &tcp_acceptor_rinit;
/*
* the conn_dev and minor_arena will be subsequently used by
* tcp_wput_accept() and tcpclose_accept() to figure out the
* minor device number for this connection from the q_ptr.
*/
qprocson(q);
return (0);
}
/*
* Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
* so we drop it by one.
*/
return (ENOSR);
}
if (isv6) {
} else {
}
/*
* TCP keeps a copy of cred for cache locality reasons but
* we put a reference only once. If connp->conn_cred
* becomes invalid, tcp_cred should also be set to NULL.
*/
/*
* If the caller has the process-wide flag set, then default to MAC
* exempt mode. This allows read-down to unlabeled hosts.
*/
if (flag & SO_SOCKSTR) {
/*
* No need to insert a socket in tcp acceptor hash.
* If it was a socket acceptor stream, we dealt with
* it above. A socket listener can never accept a
* connection and doesn't need acceptor_id.
*/
} else {
#ifdef _ILP32
#else
#endif /* _ILP32 */
}
if (tcps->tcps_trace)
if (err != 0) {
return (err);
}
/* Non-zero default values */
/*
* Put the ref for TCP. Ref for IP was already put
* by ipcl_conn_create. Also Make the conn_t globally
* visible to walkers
*/
qprocson(q);
return (0);
}
/*
* Some TCP options can be "set" by requesting them in the option
* buffer. This is needed for XTI feature test though we do not
* allow it in general. We interpret that this mechanism is more
* applicable to OSI protocols and need not be allowed in general.
* This routine filters out options for which it is not allowed (most)
* and lets through those (few) for which it is. [ The XTI interface
* test suite specifics will imply that any XTI_GENERIC level XTI_* if
* ever implemented will have to be allowed here ].
*/
static boolean_t
{
switch (level) {
case IPPROTO_TCP:
switch (name) {
case TCP_NODELAY:
return (B_TRUE);
default:
return (B_FALSE);
}
/*NOTREACHED*/
default:
return (B_FALSE);
}
/*NOTREACHED*/
}
/*
* This routine gets default values of certain options whose default
* values are maintained by protocol specific code
*/
/* ARGSUSED */
int
{
switch (level) {
case IPPROTO_TCP:
switch (name) {
case TCP_NOTIFY_THRESHOLD:
break;
case TCP_ABORT_THRESHOLD:
break;
break;
case TCP_CONN_ABORT_THRESHOLD:
break;
default:
return (-1);
}
break;
case IPPROTO_IP:
switch (name) {
case IP_TTL:
break;
default:
return (-1);
}
break;
case IPPROTO_IPV6:
switch (name) {
case IPV6_UNICAST_HOPS:
break;
default:
return (-1);
}
break;
default:
return (-1);
}
return (sizeof (int));
}
/*
* TCP routine to get the values of options.
*/
int
{
switch (level) {
case SOL_SOCKET:
switch (name) {
case SO_LINGER: {
}
return (sizeof (struct linger));
case SO_DEBUG:
break;
case SO_KEEPALIVE:
break;
case SO_DONTROUTE:
break;
case SO_USELOOPBACK:
break;
case SO_BROADCAST:
break;
case SO_REUSEADDR:
break;
case SO_OOBINLINE:
break;
case SO_DGRAM_ERRIND:
break;
case SO_TYPE:
*i1 = SOCK_STREAM;
break;
case SO_SNDBUF:
break;
case SO_RCVBUF:
break;
case SO_SND_COPYAVOID:
SO_SND_COPYAVOID : 0;
break;
case SO_ALLZONES:
break;
case SO_ANON_MLP:
break;
case SO_MAC_EXEMPT:
break;
case SO_EXCLBIND:
break;
case SO_PROTOTYPE:
*i1 = IPPROTO_TCP;
break;
case SO_DOMAIN:
break;
default:
return (-1);
}
break;
case IPPROTO_TCP:
switch (name) {
case TCP_NODELAY:
break;
case TCP_MAXSEG:
break;
case TCP_NOTIFY_THRESHOLD:
break;
case TCP_ABORT_THRESHOLD:
break;
break;
case TCP_CONN_ABORT_THRESHOLD:
break;
case TCP_RECVDSTADDR:
break;
case TCP_ANONPRIVBIND:
break;
case TCP_EXCLBIND:
break;
case TCP_INIT_CWND:
break;
case TCP_KEEPALIVE_THRESHOLD:
break;
break;
case TCP_CORK:
break;
default:
return (-1);
}
break;
case IPPROTO_IP:
return (-1);
switch (name) {
case IP_OPTIONS:
case T_IP_OPTIONS: {
/*
* This is compatible with BSD in that in only return
* the reverse source route with the final destination
* as the last entry. The first 4 bytes of the option
* will contain the final destination.
*/
int opt_len;
/* Caller ensures enough space */
if (opt_len > 0) {
/*
* TODO: Do we have to handle getsockopt on an
* initiator as well?
*/
}
return (0);
}
case IP_TOS:
case T_IP_TOS:
break;
case IP_TTL:
break;
case IP_NEXTHOP:
/* Handled at IP level */
return (-EINVAL);
default:
return (-1);
}
break;
case IPPROTO_IPV6:
/*
* IPPROTO_IPV6 options are only supported for sockets
* that are using IPv6 on the wire.
*/
return (-1);
}
switch (name) {
case IPV6_UNICAST_HOPS:
break; /* goto sizeof (int) option return */
case IPV6_BOUND_IF:
/* Zero if not set */
break; /* goto sizeof (int) option return */
case IPV6_RECVPKTINFO:
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVTCLASS:
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVHOPLIMIT:
if (tcp->tcp_ipv6_recvancillary &
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVHOPOPTS:
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVDSTOPTS:
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case _OLD_IPV6_RECVDSTOPTS:
if (tcp->tcp_ipv6_recvancillary &
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVRTHDR:
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_RECVRTHDRDSTOPTS:
if (tcp->tcp_ipv6_recvancillary &
*i1 = 1;
else
*i1 = 0;
break; /* goto sizeof (int) option return */
case IPV6_PKTINFO: {
/* XXX assumes that caller has room for max size! */
struct in6_pktinfo *pkti;
else
pkti->ipi6_ifindex = 0;
else
return (sizeof (struct in6_pktinfo));
}
case IPV6_TCLASS:
else
*i1 = IPV6_FLOW_TCLASS(
break; /* goto sizeof (int) option return */
case IPV6_NEXTHOP: {
return (0);
return (sizeof (sin6_t));
}
case IPV6_HOPOPTS:
return (0);
return (0);
if (tcp->tcp_label_len > 0) {
}
case IPV6_RTHDRDSTOPTS:
return (0);
return (ipp->ipp_rtdstoptslen);
case IPV6_RTHDR:
return (0);
return (ipp->ipp_rthdrlen);
case IPV6_DSTOPTS:
return (0);
return (ipp->ipp_dstoptslen);
case IPV6_SRC_PREFERENCES:
return (ip6_get_src_preferences(connp,
case IPV6_PATHMTU: {
return (-1);
connp->conn_netstack));
}
default:
return (-1);
}
break;
default:
return (-1);
}
return (sizeof (int));
}
/*
* We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements.
* Parameters are assumed to be verified by the caller.
*/
/* ARGSUSED */
int
{
int reterr;
switch (optset_context) {
case SETFN_OPTCOM_CHECKONLY:
/*
* Note: Implies T_CHECK semantics for T_OPTCOM_REQ
* inlen != 0 implies value supplied and
* we have to "pretend" to set it.
* inlen == 0 implies that there is no
* value part in T_CHECK request and just validation
* done elsewhere should be enough, we just return here.
*/
if (inlen == 0) {
*outlenp = 0;
return (0);
}
break;
case SETFN_OPTCOM_NEGOTIATE:
break;
case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */
case SETFN_CONN_NEGOTIATE:
/*
* Negotiating local and "association-related" options
* from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ)
* primitives is allowed by XTI, but we choose
* to not implement this style negotiation for Internet
* protocols (We interpret it is a must for OSI world but
* optional for Internet protocols) for all options.
* [ Will do only for the few options that enable test
* suites that our XTI implementation of this feature
* works for transports that do allow it ]
*/
*outlenp = 0;
return (EINVAL);
}
break;
default:
/*
* We should never get here
*/
*outlenp = 0;
return (EINVAL);
}
/*
* For TCP, we should have no ancillary data sent down
* (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs
* has to be zero.
*/
/*
* For fixed length options, no sanity check
* of passed in length is done. It is assumed *_optcom_req()
* routines do the right thing.
*/
switch (level) {
case SOL_SOCKET:
switch (name) {
case SO_LINGER: {
if (!checkonly) {
} else {
tcp->tcp_linger = 0;
tcp->tcp_lingertime = 0;
}
/* struct copy */
} else {
((struct linger *)
((struct linger *)
} else {
/* struct copy */
}
}
return (0);
}
case SO_DEBUG:
if (!checkonly)
break;
case SO_KEEPALIVE:
if (checkonly) {
/* T_CHECK case */
break;
}
if (!onoff) {
if (tcp->tcp_ka_enabled) {
if (tcp->tcp_ka_tid != 0) {
(void) TCP_TIMER_CANCEL(tcp,
tcp->tcp_ka_tid);
tcp->tcp_ka_tid = 0;
}
tcp->tcp_ka_enabled = 0;
}
break;
}
if (!tcp->tcp_ka_enabled) {
/* Crank up the keepalive timer */
tcp->tcp_ka_last_intrvl = 0;
}
break;
case SO_DONTROUTE:
/*
* SO_DONTROUTE, SO_USELOOPBACK, and SO_BROADCAST are
* only of interest to IP. We track them here only so
* that we can report their current value.
*/
if (!checkonly) {
}
break;
case SO_USELOOPBACK:
if (!checkonly) {
}
break;
case SO_BROADCAST:
if (!checkonly) {
}
break;
case SO_REUSEADDR:
if (!checkonly) {
}
break;
case SO_OOBINLINE:
if (!checkonly)
break;
case SO_DGRAM_ERRIND:
if (!checkonly)
break;
case SO_SNDBUF: {
*outlenp = 0;
return (ENOBUFS);
}
if (checkonly)
break;
if (tcps->tcps_snd_lowat_fraction != 0)
/*
* If we are flow-controlled, recheck the condition.
* There are apps that increase SO_SNDBUF size when
* flow-controlled (EWOULDBLOCK), and expect the flow
* control condition to be lifted right away.
*/
if (tcp->tcp_flow_stopped &&
}
break;
}
case SO_RCVBUF:
*outlenp = 0;
return (ENOBUFS);
}
/* Silently ignore zero */
}
/*
* XXX should we return the rwnd here
* and tcp_opt_get ?
*/
break;
case SO_SND_COPYAVOID:
if (!checkonly) {
/* we only allow enable at most once for now */
if (tcp->tcp_loopback ||
(!tcp->tcp_snd_zcopy_aware &&
*outlenp = 0;
return (EOPNOTSUPP);
}
}
break;
case SO_ALLZONES:
/* Pass option along to IP level for handling */
return (-EINVAL);
case SO_ANON_MLP:
/* Pass option along to IP level for handling */
return (-EINVAL);
case SO_MAC_EXEMPT:
/* Pass option along to IP level for handling */
return (-EINVAL);
case SO_EXCLBIND:
if (!checkonly)
break;
default:
*outlenp = 0;
return (EINVAL);
}
break;
case IPPROTO_TCP:
switch (name) {
case TCP_NODELAY:
if (!checkonly)
break;
case TCP_NOTIFY_THRESHOLD:
if (!checkonly)
break;
case TCP_ABORT_THRESHOLD:
if (!checkonly)
break;
if (!checkonly)
break;
case TCP_CONN_ABORT_THRESHOLD:
if (!checkonly)
break;
case TCP_RECVDSTADDR:
return (EOPNOTSUPP);
if (!checkonly)
break;
case TCP_ANONPRIVBIND:
IPPROTO_TCP)) != 0) {
*outlenp = 0;
return (reterr);
}
if (!checkonly) {
}
break;
case TCP_EXCLBIND:
if (!checkonly)
break; /* goto sizeof (int) option return */
case TCP_INIT_CWND: {
if (checkonly)
break;
/*
* Only allow socket with network configuration
* privilege to set the initial cwnd to be larger
* than allowed by RFC 3390.
*/
break;
}
*outlenp = 0;
return (reterr);
}
if (init_cwnd > TCP_MAX_INIT_CWND) {
*outlenp = 0;
return (EINVAL);
}
break;
}
case TCP_KEEPALIVE_THRESHOLD:
if (checkonly)
break;
*outlenp = 0;
return (EINVAL);
}
/*
* Check if we need to restart the
* keepalive timer.
*/
if (tcp->tcp_ka_tid != 0) {
(void) TCP_TIMER_CANCEL(tcp,
tcp->tcp_ka_tid);
tcp->tcp_ka_last_intrvl = 0;
}
}
break;
if (!checkonly) {
if (*i1 <
*i1 >
*outlenp = 0;
return (EINVAL);
}
}
break;
case TCP_CORK:
if (!checkonly) {
/*
* if tcp->tcp_cork was set and is now
* being unset, we have to make sure that
* the remaining data gets sent out. Also
* unset tcp->tcp_cork so that tcp_wput_data()
* can send data even if it is less than mss
*/
tcp->tcp_unsent > 0) {
}
}
break;
default:
*outlenp = 0;
return (EINVAL);
}
break;
case IPPROTO_IP:
*outlenp = 0;
return (ENOPROTOOPT);
}
switch (name) {
case IP_OPTIONS:
case T_IP_OPTIONS:
if (reterr) {
*outlenp = 0;
return (reterr);
}
/* OK return - copy input buffer into output buffer */
}
return (0);
case IP_TOS:
case T_IP_TOS:
if (!checkonly) {
}
break;
case IP_TTL:
if (!checkonly) {
}
break;
case IP_BOUND_IF:
case IP_NEXTHOP:
/* Handled at the IP level */
return (-EINVAL);
case IP_SEC_OPT:
/*
* We should not allow policy setting after
* we start listening for connections.
*/
return (EINVAL);
} else {
/* Handled at the IP level */
return (-EINVAL);
}
default:
*outlenp = 0;
return (EINVAL);
}
break;
case IPPROTO_IPV6: {
/*
* IPPROTO_IPV6 options are only supported for sockets
* that are using IPv6 on the wire.
*/
*outlenp = 0;
return (ENOPROTOOPT);
}
/*
* Only sticky options; no ancillary data
*/
switch (name) {
case IPV6_UNICAST_HOPS:
/* -1 means use default */
*outlenp = 0;
return (EINVAL);
}
if (!checkonly) {
if (*i1 == -1) {
/* Pass modified value to IP. */
} else {
}
if (reterr != 0)
return (reterr);
}
break;
case IPV6_BOUND_IF:
if (!checkonly) {
int error = 0;
if (error != 0) {
*outlenp = 0;
return (error);
}
}
break;
/*
* Set boolean switches for ancillary data delivery
*/
case IPV6_RECVPKTINFO:
if (!checkonly) {
if (onoff)
else
/* Force it to be sent up with the next msg */
tcp->tcp_recvifindex = 0;
}
break;
case IPV6_RECVTCLASS:
if (!checkonly) {
if (onoff)
else
}
break;
case IPV6_RECVHOPLIMIT:
if (!checkonly) {
if (onoff)
else
/* Force it to be sent up with the next msg */
}
break;
case IPV6_RECVHOPOPTS:
if (!checkonly) {
if (onoff)
else
}
break;
case IPV6_RECVDSTOPTS:
if (!checkonly) {
if (onoff)
else
}
break;
case _OLD_IPV6_RECVDSTOPTS:
if (!checkonly) {
if (onoff)
else
}
break;
case IPV6_RECVRTHDR:
if (!checkonly) {
if (onoff)
else
}
break;
case IPV6_RECVRTHDRDSTOPTS:
if (!checkonly) {
if (onoff)
else
}
break;
case IPV6_PKTINFO:
return (EINVAL);
if (checkonly)
break;
if (inlen == 0) {
} else {
struct in6_pktinfo *pkti;
/*
* RFC 3542 states that ipi6_addr must be
* the unspecified address when setting the
* IPV6_PKTINFO sticky socket option on a
* TCP socket.
*/
return (EINVAL);
/*
* ip6_set_pktinfo() validates the source
* address and interface index.
*/
if (reterr != 0)
return (reterr);
if (ipp->ipp_ifindex != 0)
else
else
}
if (reterr != 0)
return (reterr);
break;
case IPV6_TCLASS:
return (EINVAL);
if (checkonly)
break;
if (inlen == 0) {
} else {
return (EINVAL);
if (*i1 == -1) {
ipp->ipp_tclass = 0;
*i1 = 0;
} else {
}
}
if (reterr != 0)
return (reterr);
break;
case IPV6_NEXTHOP:
/*
* IP will verify that the nexthop is reachable
* and fail for sticky options.
*/
return (EINVAL);
if (checkonly)
break;
if (inlen == 0) {
} else {
return (EAFNOSUPPORT);
if (IN6_IS_ADDR_V4MAPPED(
return (EADDRNOTAVAIL);
if (!IN6_IS_ADDR_UNSPECIFIED(
&ipp->ipp_nexthop))
else
}
if (reterr != 0)
return (reterr);
break;
case IPV6_HOPOPTS: {
/*
* Sanity checks - minimum size, size a multiple of
* eight bytes, and matching size passed in.
*/
if (inlen != 0 &&
return (EINVAL);
if (checkonly)
break;
if (reterr != 0)
return (reterr);
if (ipp->ipp_hopoptslen == 0)
else
if (reterr != 0)
return (reterr);
break;
}
case IPV6_RTHDRDSTOPTS: {
/*
* Sanity checks - minimum size, size a multiple of
* eight bytes, and matching size passed in.
*/
if (inlen != 0 &&
return (EINVAL);
if (checkonly)
break;
&ipp->ipp_rtdstoptslen, 0);
if (reterr != 0)
return (reterr);
if (ipp->ipp_rtdstoptslen == 0)
else
if (reterr != 0)
return (reterr);
break;
}
case IPV6_DSTOPTS: {
/*
* Sanity checks - minimum size, size a multiple of
* eight bytes, and matching size passed in.
*/
if (inlen != 0 &&
return (EINVAL);
if (checkonly)
break;
&ipp->ipp_dstoptslen, 0);
if (reterr != 0)
return (reterr);
if (ipp->ipp_dstoptslen == 0)
else
if (reterr != 0)
return (reterr);
break;
}
case IPV6_RTHDR: {
/*
* Sanity checks - minimum size, size a multiple of
* eight bytes, and matching size passed in.
*/
if (inlen != 0 &&
return (EINVAL);
if (checkonly)
break;
&ipp->ipp_rthdrlen, 0);
if (reterr != 0)
return (reterr);
if (ipp->ipp_rthdrlen == 0)
else
if (reterr != 0)
return (reterr);
break;
}
case IPV6_V6ONLY:
if (!checkonly)
break;
case IPV6_USE_MIN_MTU:
if (inlen != sizeof (int))
return (EINVAL);
return (EINVAL);
if (checkonly)
break;
break;
case IPV6_BOUND_PIF:
/* Handled at the IP level */
return (-EINVAL);
case IPV6_SEC_OPT:
/*
* We should not allow policy setting after
* we start listening for connections.
*/
return (EINVAL);
} else {
/* Handled at the IP level */
return (-EINVAL);
}
case IPV6_SRC_PREFERENCES:
return (EINVAL);
if (reterr != 0) {
*outlenp = 0;
return (reterr);
}
break;
default:
*outlenp = 0;
return (EINVAL);
}
break;
} /* end IPPROTO_IPV6 */
default:
*outlenp = 0;
return (EINVAL);
}
/*
* Common case of OK return with outval same as inval
*/
}
return (0);
}
/*
* Update tcp_sticky_hdrs based on tcp_sticky_ipp.
* The headers include ip6i_t (if needed), ip6_t, any sticky extension
* headers, and the maximum size tcp header (to avoid reallocation
* on the fly for additional tcp options).
* Returns failure if can't allocate memory.
*/
static int
{
char *hdrs;
char buf[TCP_MAX_HDR_LENGTH];
/*
*/
/* Need to reallocate */
return (ENOMEM);
if (tcp->tcp_hdr_grown) {
} else {
}
tcp->tcp_iphc_len = 0;
}
}
/* Set header fields not in ipp */
} else {
}
/*
* tcp->tcp_ip_hdr_len will include ip6i_t if there is one.
*
* tcp->tcp_tcp_hdr_len doesn't change here.
*/
/*
* If the hop limit was not set by ip_build_hdrs_v6(), set it to
* the default value for TCP.
*/
/*
* If we're setting extension headers after a connection
* has been established, and if we have a routing header
* among the extension headers, call ip_massage_options_v6 to
* difference in the tcp header template.
* (This happens in tcp_connect_ipv6 if the routing header
* is set prior to the connect.)
* Set the tcp_sum to zero first in case we've cleared a
* routing header or don't have one at all.
*/
}
}
/* Try to get everything in a single mblk */
return (0);
}
/*
*/
static int
{
switch (optval) {
case IPOPT_SSRR:
case IPOPT_LSRR:
/* Reverse source route */
/*
* First entry should be the next to last one in the
* current source route (the last entry is our
* address.)
* The last entry should be the final destination.
*/
if (off2 < 0) {
/* No entries in source route */
break;
}
/*
* Note: use src since ipha has not had its src
* and dst reversed (it is in the state it was
* received.
*/
off2 -= IP_ADDR_LEN;
while (off2 > 0) {
off1 += IP_ADDR_LEN;
off2 -= IP_ADDR_LEN;
}
break;
}
}
done:
/* Pad the resulting options */
while (len & 0x3) {
len++;
}
return (len);
}
/*
* Extract and revert a source route from ipha (if any)
* and then update the relevant fields in both tcp_t and the standard header.
*/
static void
{
char buf[TCP_MAX_HDR_LENGTH];
int len;
if (len == IP_SIMPLE_HDR_LENGTH)
/* Nothing to do */
return;
(len & 0x3))
return;
}
/*
* Copy the standard header into its new location,
* lay in the new options and then update the relevant
* fields in both tcp_t and the standard header.
*/
static int
{
return (EINVAL);
return (EINVAL);
if (checkonly) {
/*
* do not really set, just pretend to - T_CHECK
*/
return (0);
}
if (tcp->tcp_label_len > 0) {
int padlen;
/* convert list termination to no-ops */
while (--padlen >= 0)
}
if (!TCP_IS_DETACHED(tcp)) {
/* Always allocate room for all options. */
}
return (0);
}
/* Get callback routine passed to nd_load by tcp_param_register */
/* ARGSUSED */
static int
{
return (0);
}
/*
* Walk through the param array specified registering each element with the
* named dispatch handler.
*/
static boolean_t
{
return (B_FALSE);
}
}
}
KM_SLEEP);
sizeof (tcpparam_t));
return (B_FALSE);
}
KM_SLEEP);
sizeof (tcpparam_t));
return (B_FALSE);
}
KM_SLEEP);
sizeof (tcpparam_t));
return (B_FALSE);
}
KM_SLEEP);
sizeof (tcpparam_t));
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
NULL)) {
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
tcp_host_param_set, NULL)) {
return (B_FALSE);
}
return (B_FALSE);
}
tcp_1948_phrase_set, NULL)) {
return (B_FALSE);
}
return (B_FALSE);
}
/*
* Dummy ndd variables - only to convey obsolescence information
* through printing of their name (no get or set routines)
* XXX Remove in future releases ?
*/
"tcp_close_wait_interval(obsoleted - "
return (B_FALSE);
}
return (B_TRUE);
}
/* ndd set routine for tcp_wroff_xtra, tcp_mdt_hdr_{head,tail}_min. */
/* ARGSUSED */
static int
{
long new_value;
return (EINVAL);
}
/*
* Need to make sure new_value is a multiple of 4. If it is not,
* round it up. For future 64 bit requirement, we actually make it
* a multiple of 8.
*/
if (new_value & 0x7) {
}
return (0);
}
/* Set callback routine passed to nd_load by tcp_param_register */
/* ARGSUSED */
static int
{
long new_value;
return (EINVAL);
}
return (0);
}
/*
* Add a new piece to the tcp reassembly queue. If the gap at the beginning
* is filled, return as much as we can. The message passed in may be
* multi-part, chained using b_cont. "start" is the starting sequence
* number for this piece.
*/
static mblk_t *
{
/* Walk through all the new pieces. */
do {
/* Empty. Blast it. */
continue;
}
if (!mp1) {
continue;
}
/* New stuff completely beyond tail? */
/* Link it on end. */
continue;
}
/* New stuff at the front? */
/* Yes... Check for overlap. */
continue;
}
/*
* The new piece fits somewhere between the head and tail.
* We find our slot, where mp1 precedes us and mp2 trails.
*/
break;
}
/* Link ourselves in */
/* Trim overlap with following mblk(s) first */
/* Trim overlap with preceding mblk */
/* Anything ready to go? */
return (NULL);
/* Eat what we can off the queue */
for (;;) {
TCP_REASS_SET_SEQ(mp1, 0);
TCP_REASS_SET_END(mp1, 0);
if (!mp) {
break;
}
break;
}
}
return (mp1);
}
/* Eliminate any overlap that mp may have over later mblks */
static void
{
break;
break;
}
TCP_REASS_SET_SEQ(mp1, 0);
TCP_REASS_SET_END(mp1, 0);
}
if (!mp1)
}
/*
* Send up all messages queued on tcp_rcv_list.
*/
static uint_t
{
#ifdef DEBUG
#endif
/* Can't drain on an eager connection */
return (ret);
/* Can't be sodirect enabled */
/* No need for the push timer now. */
if (tcp->tcp_push_tid != 0) {
tcp->tcp_push_tid = 0;
}
/*
* Handle two cases here: we are currently fused or we were
* previously fused and have some urgent data to be delivered
* upstream. The latter happens because we either ran out of
* memory or were detached and therefore sending the SIGURG was
* deferred until this point. In either case we pass control
* over to tcp_fuse_rcv_drain() since it may need to complete
* some work.
*/
return (ret);
}
#ifdef DEBUG
#endif
/* Does this need SSL processing first? */
continue;
}
}
tcp->tcp_rcv_cnt = 0;
/* Learn the latest rwnd information that we sent to the other side. */
<< tcp->tcp_rcv_ws;
/* This is peer's calculated send window (our receive window). */
/*
* Increase the receive window to max. But we need to do receiver
* SWS avoidance. This means that we need to check the increase of
* of receive window is at least 1 MSS.
*/
/*
* If the window that the other side knows is less than max
* deferred acks segments, send an update immediately.
*/
ret = TH_ACK_NEEDED;
}
}
return (ret);
}
/*
* Queue data on tcp_rcv_list which is a b_next chain.
* tcp_rcv_last_head/tail is the last element of this chain.
* Each element of the chain is a b_cont chain.
*
* M_DATA messages are added to the current element.
* Other messages are added as new (b_next) elements.
*/
void
{
} else {
}
}
/*
* The tcp_rcv_sod_XXX() functions enqueue data directly to the socket
* above, in addition when uioa is enabled schedule an asynchronous uio
* prior to enqueuing. They implement the combinhed semantics of the
* tcp_rcv_XXX() functions, tcp_rcv_list push logic, and STREAMS putnext()
* canputnext(), i.e. flow-control with backenable.
*
* tcp_sod_wakeup() is called where tcp_rcv_drain() would be called in the
* non sodirect connection but as there are no tcp_tcv_list mblk_t's we deal
* with the rcv_wnd and push timer and call the sodirect wakeup function.
*
* Must be called with sodp->sod_lock held and will return with the lock
* released.
*/
static uint_t
{
/* Can't be an eager connection */
/* Caller must have lock held */
/* Sodirect mode so must not be a tcp_rcv_list */
/* Q is full, mark Q for need backenable */
}
/* Last advertised rwnd, i.e. rwnd last sent in a packet */
<< tcp->tcp_rcv_ws;
/* This is peer's calculated send window (our available rwnd). */
/*
* Increase the receive window to max. But we need to do receiver
* SWS avoidance. This means that we need to check the increase of
* of receive window is at least 1 MSS.
*/
/*
* If the window that the other side knows is less than max
* deferred acks segments, send an update immediately.
*/
ret = TH_ACK_NEEDED;
}
}
if (!SOD_QEMPTY(sodp)) {
/* Wakeup to socket */
/* wakeup() does the mutex_ext() */
} else {
/* Q is empty, no need to wake */
}
/* No need for the push timer now. */
if (tcp->tcp_push_tid != 0) {
tcp->tcp_push_tid = 0;
}
return (ret);
}
/*
* Called where tcp_rcv_enqueue()/putnext(RD(q)) would be. For M_DATA
* mblk_t's if uioa enabled then start a uioa asynchronous copy directly
* to the user-land buffer and flag the mblk_t as such.
*
* Also, handle tcp_rwnd.
*/
{
/* Can't be an eager connection */
/* Caller must have lock held */
/* Sodirect mode so must not be a tcp_rcv_list */
/* Passed in segment length must be equal to mblk_t chain data size */
/* Only process M_DATA mblk_t's */
goto enq;
}
/* Uioa is enabled */
/*
* There isn't enough uio space for the mblk_t chain
* so disable uioa such that this and any additional
* mblk_t data is handled by the socket and schedule
* the socket for wakeup to finish this uioa.
*/
}
goto enq;
}
do {
/* Scheduled, mark dblk_t as such */
} else {
/* Error, turn off async processing */
break;
}
/*
* Not all mblk_t(s) uioamoved (error) or all uio
* space has been consumed so schedule the socket
* for wakeup to finish this uio.
*/
}
/*
* Post UIO_ENABLED waiting for socket to finish processing
* so just enqueue and update tcp_rwnd.
*/
/*
* Uioa isn't enabled but sodirect has a pending read().
*/
/* Schedule socket for wakeup */
}
}
/*
* No pending sodirect read() so used the default
* TCP push logic to guess that a push is needed.
*/
/* Schedule socket for wakeup */
}
} else {
/* Just update tcp_rwnd */
}
enq:
/* Wasn't QFULL, now QFULL, need back-enable */
}
/*
* Check to see if remote avail swnd < mss due to delayed ACK,
* first get advertised rwnd.
*/
/* Minus delayed ACK count */
/* Remote avail swnd < mss, need ACK now */
return (TH_ACK_NEEDED);
}
return (0);
}
/*
* DEFAULT TCP ENTRY POINT via squeue on READ side.
*
* This is the default entry function into TCP on the read side. TCP is
* always entered via squeue i.e. using squeue's for mutual exclusion.
* When classifier does a lookup to find the tcp, it also puts a reference
* on the conn structure associated so the tcp is guaranteed to exist
* when we come here. We still need to check the state because it might
* as well has been closed. The squeue processing function i.e. squeue_enter,
* squeue_enter_nodrain, or squeue_drain is responsible for doing the
* CONN_DEC_REF.
*
* Apart from the default entry point, IP also sends packets directly to
* tcp_rput_data for AF_INET fast path and tcp_conn_request for incoming
* connections.
*/
void
{
/* arg2 is the sqp */
/*
* Don't accept any input on a closed tcp as this TCP logically does
* not exist on the system. Don't proceed further with this TCP.
* For eg. this packet could trigger another close of this tcp
* which would be disastrous for tcp_refcnt. tcp_close_detached /
* tcp_clean_death / tcp_closei_local must be called at most once
* on a TCP. In this case we need to refeed the packet into the
* classifier and figure out where the packet should go. Need to
* preserve the recv_ill somehow. Until we figure that out, for
* now just drop the packet if we can't classify the packet.
*/
return;
}
/* We failed to classify. For now just drop the packet */
return;
}
else
}
/*
* The read side put procedure.
* The packets passed up by ip are assume to be aligned according to
* OK_32PTR and the IP+TCP headers fitting in the first mblk.
*/
static void
{
/*
* tcp_rput_data() does not expect M_CTL except for the case
* where tcp_ipv6_recvancillary is set and we get a IN_PKTINFO
* type. Need to make sure that any other M_CTLs don't make
* it to tcp_rput_data since it is not expecting any and doesn't
* check for it.
*/
case TCP_IOC_ABORT_CONN:
/*
* Handle connection abort request.
*/
return;
case IPSEC_IN:
/*
* Only secure icmp arrive in TCP and they
* don't go through data path.
*/
return;
case IN_PKTINFO:
/*
* Handle IPV6_RECVPKTINFO socket option on AF_INET6
* sockets that are receiving IPv4 traffic. tcp
*/
return;
case MDT_IOC_INFO_UPDATE:
/*
* Handle Multidata information update; the
* following routine will free the message.
*/
B_FALSE);
}
return;
case LSO_IOC_INFO_UPDATE:
/*
* Handle LSO information update; the following
* routine will free the message.
*/
}
return;
default:
/*
* tcp_icmp_err() will process the M_CTL packets.
* Non-ICMP packets, if any, will be discarded in
* tcp_icmp_err(). We will process the ICMP packet
* even if we are TCP_IS_DETACHED_NONEAGER as the
* incoming ICMP packet may result in changing
* the tcp_mss, which we would need if we have
* packets to retransmit.
*/
return;
}
}
/* No point processing the message if tcp is already closed */
if (TCP_IS_DETACHED_NONEAGER(tcp)) {
return;
}
}
/* The minimum of smoothed mean deviation in RTO calculation. */
#define TCP_SD_MIN 400
/*
* Set RTO for this connection. The formula is from Jacobson and Karels'
* "Congestion Avoidance and Control" in SIGCOMM '88. The variable names
* are the same as those in Appendix A.2 of that paper.
*
* m = new measurement
* sa = smoothed RTT average (8 * average estimates).
* sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates).
*/
static void
{
long m = TICK_TO_MSEC(rtt);
tcp->tcp_rtt_update++;
/* tcp_rtt_sa is not 0 means this is a new sample. */
if (sa != 0) {
/*
* Update average estimator:
* new rtt = 7/8 old rtt + 1/8 Error
*/
/* m is now Error in estimate. */
m -= sa >> 3;
if ((sa += m) <= 0) {
/*
* Don't allow the smoothed average to be negative.
* We use 0 to denote reinitialization of the
* variables.
*/
sa = 1;
}
/*
* Update deviation estimator:
* new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev)
*/
if (m < 0)
m = -m;
m -= sv >> 2;
sv += m;
} else {
/*
* This follows BSD's implementation. So the reinitialized
* RTO is 3 * m. We cannot go less than 2 because if the
* link is bandwidth dominated, doubling the window size
* during slow start means doubling the RTT. We want to be
* more conservative when we reinitialize our estimates. 3
* is just a convenient number.
*/
sa = m << 3;
sv = m << 1;
}
if (sv < TCP_SD_MIN) {
/*
* We do not know that if sa captures the delay ACK
* effect as in a long train of segments, a receiver
* does not delay its ACKs. So set the minimum of sv
* to be TCP_SD_MIN, which is default to 400 ms, twice
* of BSD DATO. That means the minimum of mean
* deviation is 100 ms.
*
*/
sv = TCP_SD_MIN;
}
/*
* RTO = average estimates (sa / 8) + 4 * deviation estimates (sv)
*
* Add tcp_rexmit_interval extra in case of extreme environment
* where the algorithm fails to work. The default value of
* tcp_rexmit_interval_extra should be 0.
*
* As we use a finer grained clock than BSD and update
* RTO for every ACKs, add in another .25 of RTT to the
* deviation of RTO to accomodate burstiness of 1/4 of
* window size.
*/
} else {
}
/* Now, we can reset tcp_timer_backoff to use the new RTO... */
tcp->tcp_timer_backoff = 0;
}
/*
* tcp_get_seg_mp() is called to get the pointer to a segment in the
* send queue which starts at the given seq. no.
*
* Parameters:
* tcp_t *tcp: the tcp instance pointer.
* uint32_t seq: the starting seq. no of the requested segment.
* int32_t *off: after the execution, *off will be the offset to
* the returned mblk which points to the requested seq no.
* It is the caller's responsibility to send in a non-null off.
*
* Return:
* A mblk_t pointer pointing to the requested segment in send queue.
*/
static mblk_t *
{
/* Defensive coding. Make sure we don't send incorrect data. */
return (NULL);
if (cnt < 0) {
break;
}
}
return (mp);
}
/*
* This function handles all retransmissions if SACK is enabled for this
* connection. First it calculates how many segments can be retransmitted
* based on tcp_pipe. Then it goes thru the notsack list to find eligible
* segments. A segment is eligible if sack_cnt for that segment is greater
* than or equal tcp_dupack_fast_retransmit. After it has retransmitted
* all eligible segments, it checks to see if TCP can send some new segments
* (fast recovery). If it can, set the appropriate flag for tcp_rput_data().
*
* Parameters:
* tcp_t *tcp: the tcp structure of the connection.
* uint_t *flags: in return, appropriate value will be set for
* tcp_rput_data().
*/
static void
{
/* Defensive coding in case there is a bug... */
return;
}
/*
* Limit the num of outstanding data in the network to be
* tcp_cwnd_ssthresh, which is half of the original congestion wnd.
*/
/* At least retransmit 1 MSS of data. */
if (usable_swnd <= 0) {
usable_swnd = mss;
}
/* Make sure no new RTT samples will be taken. */
while (usable_swnd > 0) {
(notsack_blk->sack_cnt >=
}
break;
}
}
/*
* All holes are filled. Manipulate tcp_cwnd to send more
* if we can. Note that after the SACK recovery, tcp_cwnd is
* set to tcp_cwnd_ssthresh.
*/
if (notsack_blk == NULL) {
return;
} else {
*flags |= TH_XMIT_NEEDED;
return;
}
}
/*
* Note that we may send more than usable_swnd allows here
* because of round off, but no more than 1 MSS of data.
*/
/* This should not happen. Defensive coding again... */
return;
}
return;
usable_swnd -= seg_len;
/*
* Update the send timestamp to avoid false retransmission.
*/
/*
* Update tcp_rexmit_max to extend this SACK recovery phase.
* This happens when new data sent during fast recovery is
* also lost. If TCP retransmits those new data, it needs
* to extend SACK recover phase to avoid starting another
* fast retransmit/recovery unnecessarily.
*/
}
}
}
/*
* This function handles policy checking at TCP level for non-hard_bound/
* detached connections.
*/
static boolean_t
{
ipsec_in_t *ii;
const char *reason;
/*
* We don't necessarily have an ipsec_in_act action to verify
* policy because of assymetrical policy where we have only
* outbound policy and no inbound policy (possible with global
* policy).
*/
if (!secure) {
return (B_TRUE);
&tcps->tcps_dropper);
return (B_FALSE);
}
/*
* We have a secure packet.
*/
&tcps->tcps_dropper);
return (B_FALSE);
}
/*
* XXX This whole routine is currently incorrect. ipl should
* be set to the latch pointer, but is currently not set, so
* we initialize it to NULL to avoid picking up random garbage.
*/
return (B_TRUE);
return (B_TRUE);
}
"tcp inbound policy mismatch: %s, packet dropped\n",
reason);
&tcps->tcps_dropper);
return (B_FALSE);
}
/*
* tcp_ss_rexmit() is called in tcp_rput_data() to do slow start
* retransmission after a timeout.
*
* To limit the number of duplicate segments, we limit the number of segment
* to be sent in one time to tcp_snd_burst, the burst variable.
*/
static void
{
/*
* Note that tcp_rexmit can be set even though TCP has retransmitted
* all unack'ed segments.
*/
}
}
}
return;
/*
* Update the send timestamp to avoid false
* retransmission.
*/
burst--;
}
/*
* If we have transmitted all we have at the time
* we started the retranmission, we can leave
* the rest of the job to tcp_wput_data(). But we
* need to check the send window first. If the
* win is not 0, go on with tcp_wput_data().
*/
return;
}
}
/* Only call tcp_wput_data() if there is data to be sent. */
if (tcp->tcp_unsent) {
}
}
/*
* Process all TCP option in SYN segment. Note that this function should
* be called after tcp_adapt_ire() is called so that the necessary info
* from IRE is already set in the tcp structure.
*
* This function sets up the correct tcp_mss value according to the
* MSS option value and our header size. It also sets up the window scale
* and timestamp values, and initialize SACK info blocks. But it does not
* change receive window size after setting the tcp_mss value. The caller
* should do the appropriate change.
*/
void
{
int options;
char *tmp_tcph;
/*
* Process MSS option. Note that MSS option value does not account
* for IP or TCP options. This means that it is equal to MTU - minimum
* IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for
* IPv6.
*/
if (!(options & TCP_OPT_MSS_PRESENT)) {
else
} else {
else
}
/* Process Window Scale option. */
if (options & TCP_OPT_WSCALE_PRESENT) {
} else {
}
/* Process Timestamp option. */
if ((options & TCP_OPT_TSTAMP_PRESENT) &&
/* Fill in our template header with basic timestamp option. */
tmp_tcph[0] = TCPOPT_NOP;
} else {
}
/*
* Process SACK options. If SACK is enabled for this connection,
* then allocate the SACK info structure. Note the following ways
* when tcp_snd_sack_ok is set to true.
*
* For active connection: in tcp_adapt_ire() called in
* tcp_rput_other(), or in tcp_rput_other() when tcp_sack_permitted
* is checked.
*
* For passive connection: in tcp_adapt_ire() called in
* tcp_accept_comm().
*
* That's the reason why the extra TCP_IS_DETACHED() check is there.
* That check makes sure that if we did not send a SACK OK option,
* we will not enable SACK for this connection even though the other
* side sends us SACK OK option. For active connection, the SACK
* info structure has already been allocated. So we need to free
* it if SACK is disabled.
*/
if ((options & TCP_OPT_SACK_OK_PRESENT) &&
(tcp->tcp_snd_sack_ok ||
/* This should be true only in the passive case. */
tcp->tcp_sack_info =
}
} else {
if (tcp->tcp_snd_ts_ok) {
} else {
}
}
} else {
/*
* Resetting tcp_snd_sack_ok to B_FALSE so that
* no SACK info will be used for this
* connection. This assumes that SACK usage
* permission is negotiated. This may need
* to be changed once this is clarified.
*/
tcp->tcp_sack_info);
}
}
/*
* that from tcp_mss to get our side's MSS.
*/
/*
* Here we assume that the other side's header size will be equal to
* our header size. We calculate the real MSS accordingly. Need to
* take into additional stuffs IPsec puts in.
*
*/
/*
* Set MSS to the smaller one of both ends of the connection.
* We should not have called tcp_mss_set() before, but our
* side of the MSS should have been set to a proper value
* by tcp_adapt_ire(). tcp_mss_set() will also set up the
* STREAM head parameters properly.
*
* If we have a larger-than-16-bit window but the other side
* didn't want to do window scale, tcp_rwnd_set() will take
* care of that.
*/
}
/*
* Sends the T_CONN_IND to the listener. The caller calls this
* functions via squeue to get inside the listener's perimeter
* once the 3 way hand shake is done a T_CONN_IND needs to be
* sent. As an optimization, the caller can call this directly
* if listener's perimeter is same as eager's.
*/
/* ARGSUSED */
void
{
struct T_conn_ind *conn_ind;
/* retrieve the eager */
/*
* sending eager as an option since it violates
* the option semantics. So remove the eager as
*/
if (!TCP_IS_SOCKET(listener)) {
conn_ind->OPT_length = 0;
conn_ind->OPT_offset = 0;
}
/*
* If listener has closed, it would have caused a
* just need to return.
*/
return;
}
/*
* if the conn_req_q is full defer passing up the
* T_CONN_IND until space is availabe after t_accept()
* processing
*/
/*
* Take the eager out, if it is in the list of droppable eagers
* as we are here because the 3W handshake is over.
*/
/*
* The eager already has an extra ref put in tcp_rput_data
* so that it stays till accept comes back even though it
* might get into TCPS_CLOSED as a result of a TH_RST etc.
*/
/* Move from SYN_RCVD to ESTABLISHED list */
/*
* Insert at end of the queue because sockfs
* sends down T_CONN_RES in chronological
* order. Leaving the older conn indications
* at front of the queue helps reducing search
* time.
*/
else
/*
* Delay sending up the T_conn_ind until we are
* done with the eager. Once we have have sent up
* the T_conn_ind, the accept can potentially complete
* any time and release the refhold we have on the eager.
*/
} else {
/*
* Defer connection on q0 and set deferred
* connection bit true
*/
/* take tcp out of q0 ... */
/* ... and place it at the end of q0 */
}
/* we have timed out before */
if (tcp->tcp_syn_rcvd_timeout != 0) {
tcp->tcp_syn_rcvd_timeout = 0;
if (listener->tcp_syn_defense &&
/*
* Turn off the defense mode if we
* believe the SYN attack is over.
*/
if (listener->tcp_ip_addr_cache) {
IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
}
}
}
if (addr_cache != NULL) {
/*
* We have finished a 3-way handshake with this
* remote host. This proves the IP addr is good.
* Cache it!
*/
}
if (need_send_conn_ind)
}
mblk_t *
{
ipp.ipp_fields = 0;
case M_CTL:
return (NULL);
}
return (NULL);
}
break;
case M_DATA:
break;
default:
return (NULL);
}
if (ipvers == IPV4_VERSION) {
goto done;
}
/*
* If we have IN_PKTINFO in an M_CTL and tcp_ipv6_recvancillary
* has TCP_IPV6_RECVPKTINFO set, pass I/F index along in ipp.
*/
mctl_present) {
}
}
} else {
/* Look for ifindex information */
return (NULL);
}
}
}
sizeof (tcph_t)) {
return (NULL);
}
}
/*
* Find any potentially interesting extension headers
* as well as the length of the IPv6 + extension
* headers.
*/
/* Verify if this is a TCP packet */
if (nexthdrp != IPPROTO_TCP) {
return (NULL);
}
} else {
}
}
done:
if (ip_hdr_lenp != NULL)
if (mctl_present) {
}
return (mp);
}
/*
* Handle M_DATA messages from IP. Its called directly from IP via
* squeue for AF_INET type sockets fast path. No M_CTL are expected
* in this path.
*
* For everything else (including AF_INET6 sockets with 'tcp_ipversion'
* v4 and v6), we are called through tcp_input() and a M_CTL can
* be present for options but tcp_find_pktinfo() deals with it. We
* only expect M_DATA packets after tcp_find_pktinfo() is done.
*
* There are no exceptions to this rule. The caller has already put
* the squeue will do the refrele.
*
* The TH_SYN for the listener directly go to tcp_conn_request via
* squeue.
*
* sqp: NULL = recursive, sqp != NULL means called from squeue
*/
void
{
int seg_len;
int urp;
int npkt;
int mss;
/*
* RST from fused tcp loopback peer should trigger an unfuse.
*/
}
/*
* An AF_INET socket is not capable of receiving any pktinfo. Do inline
* processing here. For rest call tcp_find_pktinfo to fill up the
* necessary information.
*/
if (IPCL_IS_TCP4(connp)) {
} else {
return;
}
}
do {
}
return;
}
/*
* This is the correct place to update tcp_last_recv_time. Note
* that it is also updated for tcp structure that belongs to
* global and listener queues which do not really need updating.
* But that should not cause any harm. And it is updated for
* all kinds of incoming segments, not only for data segments.
*/
}
/*
* TCP can't handle urgent pointers that arrive before
* the connection has been accept()ed since it can't
* buffer OOB data. Discard segment if this happens.
*
* We can't just rely on a non-null tcp_listener to indicate
* that the accept() has completed since unlinking of the
* eager and completion of the accept are not atomic.
* tcp_detached, when it is not set (B_FALSE) indicates
* that the accept() has completed.
*
* Nor can it reassemble urgent pointers, so discard
* if it's not the next segment expected.
*
* Otherwise, collapse chain into one mblk (discard if
* that fails). This makes sure the headers, retransmitted
* data, and new data all are in the same mblk.
*/
return;
}
/* Update pointers into message */
/*
* Since we can't handle any data with this urgent
* pointer that is out of sequence, we expunge
* the data. This allows us to still register
* the urgent mark and generate the M_PCSIG,
* which we can do.
*/
seg_len = 0;
}
}
case TCPS_SYN_SENT:
/*
* Note that our stack cannot send data before a
* connection is established, therefore the
* following check is valid. Otherwise, it has
* to be changed.
*/
return;
tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq",
return;
}
}
(void) tcp_clean_death(tcp,
ECONNREFUSED, 13);
return;
}
return;
}
/* Process all TCP options. */
/*
* The following changes our rwnd to be a multiple of the
* MIN(peer MSS, our MSS) for performance reason.
*/
/* Is the other end ECN capable? */
if (tcp->tcp_ecn_ok) {
}
}
/*
* Clear ECN flags because it may interfere with later
* processing.
*/
if (!TCP_IS_DETACHED(tcp)) {
/* Allocate room for SACK options if needed. */
if (tcp->tcp_snd_sack_ok) {
(tcp->tcp_loopback ? 0 :
tcps->tcps_wroff_xtra));
} else {
tcp->tcp_hdr_len +
(tcp->tcp_loopback ? 0 :
tcps->tcps_wroff_xtra));
}
}
/*
* If we can't get the confirmation upstream, pretend
* we didn't even see this one.
*
* XXX: how can we pretend we didn't see it if we
* have updated rnxt et. al.
*
* For loopback we defer sending up the T_CONN_CON
* until after some checks below.
*/
return;
}
/* SYN was acked - making progress */
/* One for the SYN */
/*
* If SYN was retransmitted, need to reset all
* retransmission info. This is because this
* segment will be treated as a dup ACK.
*/
if (tcp->tcp_rexmit) {
tcp->tcp_ms_we_have_waited = 0;
/*
* Set tcp_cwnd back to 1 MSS, per
* recommendation from
* Increasing TCP's Initial Window.
*/
}
/*
* Always send the three-way handshake ack immediately
* in order to make the connection complete as soon as
* possible on the accepting host.
*/
flags |= TH_ACK_NEEDED;
/*
* Special case for loopback. At this point we have
* received SYN-ACK from the remote endpoint. In
* order to ensure that both endpoints reach the
* fused state prior to any data exchange, the final
* ACK needs to be sent before we indicate T_CONN_CON
* to the module upstream.
*/
if (tcp->tcp_loopback) {
/*
* For loopback, we always get a pure SYN-ACK
* and only need to send back the final ACK
* with no data (this is because the other
* final ACK triggers the passive side to
* perform fusion in ESTABLISHED state.
*/
if (tcp->tcp_ack_tid != 0) {
(void) TCP_TIMER_CANCEL(tcp,
tcp->tcp_ack_tid);
tcp->tcp_ack_tid = 0;
}
/* Send up T_CONN_CON */
return;
}
/*
* Forget fusion; we need to handle more
* complex cases below. Send the deferred
* T_CONN_CON message upstream and proceed
* as usual. Mark this tcp as not capable
* of fusion.
*/
}
/*
* Check to see if there is data to be sent. If
* yes, set the transmit flag. Then check to see
* if received data processing needs to be done.
* If not, go straight to xmit_check. This short
*/
if (tcp->tcp_unsent)
flags |= TH_XMIT_NEEDED;
goto xmit_check;
}
seg_seq++;
break;
}
if (mp1) {
}
return;
case TCPS_SYN_RCVD:
/*
* In this state, a SYN|ACK packet is either bogus
* because the other side must be ACKing our SYN which
* indicates it has seen the ACK for their SYN and
* shouldn't retransmit it or we're crossing SYNs
* on active open.
*/
tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn",
return;
}
/*
* NOTE: RFC 793 pg. 72 says this should be
* tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt
* but that would mean we have an ack that ignored
* our SYN.
*/
tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack",
return;
}
}
break;
case TCPS_LISTEN:
/*
* Only a TLI listener can come through this path when a
* acceptor is going back to be a listener and a packet
* for the acceptor hits the classifier. For a socket
* listener, this can never happen because a listener
* can never accept connection on itself and hence a
* socket acceptor can not go back to being a listener.
*/
/*FALLTHRU*/
case TCPS_CLOSED:
case TCPS_BOUND: {
return;
}
/* We failed to classify. For now just drop the packet */
return;
}
case TCPS_IDLE:
/*
* Handle the case where the tcp_clean_death() has happened
* on a connection (application hasn't closed yet) but a packet
* was already queued on squeue before tcp_clean_death()
* was processed. Calling tcp_clean_death() twice on same
* connection can result in weird behaviour.
*/
return;
default:
break;
}
/*
* If this is a detached connection and not an eager
* connection hanging off a listener then new data
* (past the FIN) will cause a reset.
* We do a special check here where it
* is out of the main line, rather than check
* if we are detached every time we see new
* data down below.
*/
if (TCP_IS_DETACHED_NONEAGER(tcp) &&
/*
* This could be an SSL closure alert. We're detached so just
* acknowledge it this last time.
*/
flags |= TH_ACK_NEEDED;
goto ack_check;
}
return;
}
if (tcp->tcp_snd_ts_ok) {
/*
* This segment is not acceptable.
* Drop it and send back an ACK.
*/
flags |= TH_ACK_NEEDED;
goto ack_check;
}
} else if (tcp->tcp_snd_sack_ok) {
/*
* SACK info in already updated in tcp_parse_options. Ignore
* all other TCP options...
*/
}
/*
* gap is the amount of sequence space between what we expect to see
* and what we got for seg_seq. A positive value for gap means
* something got lost. A negative value means we got some old stuff.
*/
if (gap < 0) {
/* Old stuff present. Is the SYN in there? */
(seg_len != 0)) {
seg_seq++;
urp--;
/* Recompute the gaps after noting the SYN. */
goto try_again;
}
/* Remove the old stuff from seg_len. */
/*
* Anything left?
* Make sure to check for unack'd FIN when rest of data
* has been previously ack'd.
*/
/*
* Resets are only valid if they lie within our offered
* window. If the RST bit is set, we just ignore this
* segment.
*/
return;
}
/*
* The arriving of dup data packets indicate that we
* may have postponed an ack for too long, or the other
* side's RTT estimate is out of shape. Start acking
* more often.
*/
tcp->tcp_rack_abs_max--;
}
/*
* This segment is "unacceptable". None of its
* sequence space lies within our advertized window.
*
* Adjust seg_len to the original value for tracing.
*/
"tcp_rput: unacceptable, gap %d, rgap %d, "
"flags 0x%x, seg_seq %u, seg_ack %u, "
"seg_len %d, rnxt %u, snxt %u, %s",
}
/*
* Arrange to send an ACK in response to the
* unacceptable segment per RFC 793 page 69. There
* is only one small difference between ours and the
* acceptability test in the RFC - we accept ACK-only
* packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK
* will be generated.
*
* Note that we have to ACK an ACK-only packet at least
* for stacks that send 0-length keep-alives with
* SEG.SEQ = SND.NXT-1 as recommended by RFC1122,
* section 4.2.3.6. As long as we don't ever generate
* an unacceptable packet in response to an incoming
* packet that is unacceptable, it should not cause
* "ACK wars".
*/
flags |= TH_ACK_NEEDED;
/*
* Continue processing this segment in order to use the
* ACK information it contains, but skip all other
* sequence-number processing. Processing the ACK
* information is necessary in order to
* re-synchronize connections that may have lost
* synchronization.
*
* We clear seg_len and flag fields related to
* sequence number processing as they are not
* to be trusted for an unacceptable segment.
*/
seg_len = 0;
goto process_ack;
}
/* Fix seg_seq, and chew the gap off the front. */
do {
if (gap > 0) {
break;
}
} while (gap < 0);
/*
* If the urgent data has already been acknowledged, we
* should ignore TH_URG below
*/
if (urp < 0)
}
/*
* rgap is the amount of stuff received out of window. A negative
* value is the amount out of window.
*/
if (rgap < 0) {
} else {
}
/*
* seg_len does not include the FIN, so if more than
* just the FIN is out of window, we act like we don't
* see it. (If just the FIN is out of window, rgap
* will be zero and we will go ahead and acknowledge
* the FIN.)
*/
/* Fix seg_len and make sure there is something left. */
if (seg_len <= 0) {
/*
* Resets are only valid if they lie within our offered
* window. If the RST bit is set, we just ignore this
* segment.
*/
return;
}
/* Per RFC 793, we need to send back an ACK. */
flags |= TH_ACK_NEEDED;
/*
* Send SIGURG as soon as possible i.e. even
* if the TH_URG was delivered in a window probe
* packet (which will be unacceptable).
*
* We generate a signal if none has been generated
* for this connection or if this is a new urgent
* byte. Also send a zero-length "unmarked" message
* to inform SIOCATMARK that this is not the mark.
*
* tcp_urp_last_valid is cleared when the T_exdata_ind
* is sent up. This plus the check for old data
* (gap >= 0) handles the wraparound of the sequence
* number space without having to always track the
* correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks
* this max in its rcv_up variable).
*
* This prevents duplicate SIGURGS due to a "late"
* zero-window probe when the T_EXDATA_IND has already
* been sent up.
*/
tcp->tcp_urp_last))) {
return;
}
if (!TCP_IS_DETACHED(tcp) &&
SIGURG)) {
/* Try again on the rexmit. */
return;
}
/*
* If the next byte would be the mark
* then mark with MARKNEXT else mark
* with NOTMARKNEXT.
*/
else
}
/*
* If this is a zero window probe, continue to
* process the ACK part. But we need to set seg_len
* to 0 to avoid data processing. Otherwise just
* drop the segment and send back an ACK.
*/
seg_len = 0;
goto process_ack;
} else {
goto ack_check;
}
}
/* Pitch out of window stuff off the end. */
do {
if (rgap < 0) {
}
break;
}
}
ok:;
/*
* TCP should check ECN info for segments inside the window only.
* Therefore the check should be done here.
*/
if (tcp->tcp_ecn_ok) {
}
/*
* Note that both ECN_CE and CWR can be set in the
* same segment. In this case, we once again turn
* on ECN_ECHO.
*/
}
} else {
}
}
}
/*
* Check whether we can update tcp_ts_recent. This test is
* NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP
* Extensions for High Performance: An Update", Internet Draft.
*/
if (tcp->tcp_snd_ts_ok &&
}
/*
* FIN in an out of order segment. We record this in
* tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq.
* Clear the FIN so that any check on FIN flag will fail.
* Remember that FIN also counts in the sequence number
* space. So we need to ack out of order FIN only segments.
*/
flags |= TH_ACK_NEEDED;
}
if (seg_len > 0) {
/* Fill in the SACK blk list. */
if (tcp->tcp_snd_sack_ok) {
&(tcp->tcp_num_sack_blk));
}
/*
* Attempt reassembly and see if we have something
* ready to go.
*/
/* Always ack out of order packets */
if (mp) {
/*
* A gap is filled and the seq num and len
* of the gap match that of a previously
* received FIN, put the FIN flag back in.
*/
tcp->tcp_valid_bits &=
}
} else {
/*
* Keep going even with NULL mp.
* There may be a useful ACK or something else
* we don't want to miss.
*
* But TCP should not perform fast retransmit
* because of the ack number. TCP uses
* seg_len == 0 to determine if it is a pure
* ACK. And this is not a pure ACK.
*/
seg_len = 0;
}
}
} else if (seg_len > 0) {
/*
* If an out of order FIN was received before, and the seq
* num and len of the new segment match that of the FIN,
* put the FIN flag back in.
*/
}
}
case TCPS_SYN_RCVD:
break;
case TCPS_ESTABLISHED:
case TCPS_FIN_WAIT_1:
case TCPS_FIN_WAIT_2:
case TCPS_CLOSE_WAIT:
break;
case TCPS_CLOSING:
case TCPS_LAST_ACK:
break;
default:
break;
}
return;
}
/*
* See RFC 793, Page 71
*
* The seq number must be in the window as it should
* be "fixed" above. If it is outside window, it should
* be already rejected. Note that we allow seg_seq to be
* rnxt + rwnd because we want to accept 0 window probe.
*/
/*
* If the ACK flag is not set, just use our snxt as the
* seq number of the RST segment.
*/
}
return;
}
/*
* urp could be -1 when the urp field in the packet is 0
* and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent
* byte was at seg_seq - 1, in which case we ignore the urgent flag.
*/
if (!tcp->tcp_urp_last_valid ||
/*
* If we haven't generated the signal yet for this
* urgent pointer value, do it now. Also, send up a
* zero-length M_DATA indicating whether or not this is
* the mark. The latter is not needed when a
* T_EXDATA_IND is sent up. However, if there are
* allocation failures this code relies on the sender
* retransmitting and the socket code for determining
* the mark should not block waiting for the peer to
* transmit. Thus, for simplicity we always send up the
* mark indication.
*/
return;
}
if (!TCP_IS_DETACHED(tcp) &&
/* Try again on the rexmit. */
return;
}
/*
* Mark with NOTMARKNEXT for now.
* The code below will change this to MARKNEXT
* if we are at the mark.
*
* If there are allocation failures (e.g. in dupmsg
* below) the next time tcp_rput_data sees the urgent
* segment it will send up the MSG*MARKNEXT message.
*/
#ifdef DEBUG
"tcp_rput: sent M_PCSIG 2 seq %x urp %x "
"last %x, %s",
#endif /* DEBUG */
/*
* An allocation failure prevented the previous
* tcp_rput_data from sending up the allocated
* MSG*MARKNEXT message - send it up this time
* around.
*/
}
/*
* If the urgent byte is in this segment, make sure that it is
* all by itself. This makes it much easier to deal with the
* possibility of an allocation failure on the T_exdata_ind.
* Note that seg_len is the number of bytes in the segment, and
* urp is the offset into the segment of the urgent byte.
* urp < seg_len means that the urgent byte is in this segment.
*/
if (seg_len != 1) {
/*
* Break it up and feed it back in.
* Re-attach the IP header.
*/
if (urp > 0) {
/*
* There is stuff before the urgent
* byte.
*/
if (!mp1) {
/*
* Trim from urgent byte on.
* The rest will come back.
*/
return;
}
/* Feed this piece back in. */
/*
* If the data passed back in was not
* processed (ie: bad ACK) sending
* the remainder back in will cause a
* loop. In this case, drop the
* packet and let the sender try
* sending a good packet.
*/
return;
}
}
/*
* There is stuff after the urgent
* byte.
*/
if (!mp1) {
/*
* Trim everything beyond the
* urgent byte. The rest will
* come back.
*/
return;
}
/*
* If the data passed back in was not
* processed (ie: bad ACK) sending
* the remainder back in will cause a
* loop. In this case, drop the
* packet and let the sender try
* sending a good packet.
*/
return;
}
}
return;
}
/*
* This segment contains only the urgent byte. We
* have to allocate the T_exdata_ind, if we can.
*/
if (!tcp->tcp_urp_mp) {
struct T_exdata_ind *tei;
BPRI_MED);
if (!mp1) {
/*
* Sigh... It'll be back.
* Generate any MSG*MARK message now.
*/
seg_len = 0;
if (flags & TH_SEND_URP_MARK) {
}
goto ack_check;
}
#ifdef DEBUG
"tcp_rput: allocated exdata_ind %s",
#endif /* DEBUG */
/*
* There is no need to send a separate MSG*MARK
* message since the T_EXDATA_IND will be sent
* now.
*/
flags &= ~TH_SEND_URP_MARK;
}
/*
* Now we are all set. On the next putnext upstream,
* tcp_urp_mp will be non-NULL and will get prepended
* to what has to be this piece containing the urgent
* byte. If for any reason we abort this segment below,
* if it comes back, we will have this ready, or it
* will get blown off in close.
*/
/*
* The urgent byte is the next byte after this sequence
* number. If there is data it is marked with
* MSGMARKNEXT and any tcp_urp_mark_mp is discarded
* since it is not needed. Otherwise, if the code
* above just allocated a zero-length tcp_urp_mark_mp
* message, that message is tagged with MSGMARKNEXT.
* Sending up these MSGMARKNEXT messages makes
* SIOCATMARK work correctly even though
* the T_EXDATA_IND will not be sent up until the
* urgent byte arrives.
*/
if (seg_len != 0) {
flags &= ~TH_SEND_URP_MARK;
}
#ifdef DEBUG
"tcp_rput: AT MARK, len %d, flags 0x%x, %s",
#endif /* DEBUG */
} else {
/* Data left until we hit mark */
#ifdef DEBUG
"tcp_rput: URP %d bytes left, %s",
#endif /* DEBUG */
}
}
goto xmit_check;
}
}
/* 3-way handshake complete - pass up the T_CONN_IND */
/*
* We are here means eager is fine but it can
* get a TH_RST at any point between now and till
* accept completes and disappear. We need to
* ensure that reference to eager is valid after
* we get out of eager's perimeter. So we do
* an extra refhold.
*/
/*
* The listener also exists because of the refhold
* done in tcp_conn_request. Its possible that it
* might have closed. We will check that once we
* get inside listeners context.
*/
} else if (!tcp->tcp_loopback) {
} else {
}
}
if (tcp->tcp_active_open) {
/*
* We are seeing the final ack in the three way
* hand shake of a active open'ed connection
* so we must send up a T_CONN_CON
*/
return;
}
/*
* Don't fuse the loopback endpoints for
* simultaneous active opens.
*/
if (tcp->tcp_loopback) {
}
}
bytes_acked--;
/* SYN was acked - making progress */
/*
* If SYN was retransmitted, need to reset all
* retransmission info as this segment will be
* treated as a dup ACK.
*/
if (tcp->tcp_rexmit) {
tcp->tcp_ms_we_have_waited = 0;
}
/*
* We set the send window to zero here.
* This is needed if there is data to be
* processed already on the queue.
* Later (at swnd_update label), the
* "new_swnd > tcp_swnd" condition is satisfied
* the XMIT_NEEDED flag is set in the current
* (SYN_RCVD) state. This ensures tcp_wput_data() is
* called if there is already data on queue in
* this state.
*/
/* Fuse when both sides are in ESTABLISHED state */
}
/* This code follows 4.4BSD-Lite2 mostly. */
if (bytes_acked < 0)
goto est;
/*
* If TCP is ECN capable and the congestion experience bit is
* set, reduce tcp_cwnd and tcp_ssthresh. But this should only be
* done once per window (or more loosely, per RTT).
*/
/*
* If the cwnd is 0, use the timer to clock out
* new segments. This is required by the ECN spec.
*/
if (npkt == 0) {
/*
* This makes sure that when the ACK comes
* back, we will increase tcp_cwnd by 1 MSS.
*/
tcp->tcp_cwnd_cnt = 0;
}
/*
* This marks the end of the current window of in
* flight data. That is why we don't use
* tcp_suna + tcp_swnd. Only data in flight can
* provide ECN info.
*/
}
}
if (bytes_acked == 0) {
int dupack_cnt;
/*
* Fast retransmit. When we have seen exactly three
* identical ACKs while we have unacked data
* outstanding we take it as a hint that our peer
* dropped something.
*
* If TCP is retransmitting, don't do fast retransmit.
*/
! tcp->tcp_rexmit) {
/* Do Limited Transmit */
/*
* RFC 3042
*
* What we need to do is temporarily
* increase tcp_cwnd so that new
* data can be sent if it is allowed
* by the receive window (tcp_rwnd).
* tcp_wput_data() will take care of
* the rest.
*
* If the connection is SACK capable,
* only do limited xmit when there
* is SACK info.
*
* Note how tcp_cwnd is incremented.
* The first dup ACK will increase
* it by 1 MSS. The second dup ACK
* will increase it by 2 MSS. This
* means that only 1 new segment will
* be sent for each dup ACK.
*/
if (tcp->tcp_unsent > 0 &&
(!tcp->tcp_snd_sack_ok ||
(tcp->tcp_snd_sack_ok &&
flags |= TH_LIMIT_XMIT;
}
} else if (dupack_cnt ==
/*
* If we have reduced tcp_ssthresh
* because of ECN, do not reduce it again
* unless it is already one window of data
* away. After one window of data, tcp_cwr
* should then be cleared. Note that
* for non ECN capable connection, tcp_cwr
* should always be false.
*
* Adjust cwnd since the duplicate
* ack indicates that a packet was
* dropped (due to congestion.)
*/
mss;
}
if (tcp->tcp_ecn_ok) {
}
/*
* We do Hoe's algorithm. Refer to her
* paper "Improving the Start-up Behavior
* of a Congestion Control Scheme for TCP,"
* appeared in SIGCOMM'96.
*
* Save highest seq no we have sent so far.
* Be careful about the invisible FIN byte.
*/
(tcp->tcp_unsent == 0)) {
} else {
}
/*
* Do not allow bursty traffic during.
* fast recovery. Refer to Fall and Floyd's
* paper "Simulation-based Comparisons of
* Tahoe, Reno and SACK TCP" (in CCR?)
* This is a best current practise.
*/
/*
* For SACK:
* Calculate tcp_pipe, which is the
* estimated number of bytes in
* network.
*
* tcp_fack is the highest sack'ed seq num
* TCP has received.
*
* tcp_pipe is explained in the above quoted
* Fall and Floyd's paper. tcp_fack is
* explained in Mathis and Mahdavi's
* "Forward Acknowledgment: Refining TCP
* Congestion Control" in SIGCOMM '96.
*/
if (tcp->tcp_snd_sack_ok) {
} else {
/*
* Always initialize tcp_pipe
* even though we don't have
* any SACK info. If later
* we get SACK info and
* tcp_pipe is not initialized,
* funny things will happen.
*/
}
} else {
} /* tcp_snd_sack_ok */
} else {
/*
* Here we perform congestion
* avoidance, but NOT slow start.
* This is known as the Fast
* Recovery Algorithm.
*/
if (tcp->tcp_snd_sack_ok &&
} else {
/*
* We know that one more packet has
* left the pipe thus we can update
* cwnd.
*/
if (tcp->tcp_unsent > 0)
flags |= TH_XMIT_NEEDED;
}
}
}
} else if (tcp->tcp_zero_win_probe) {
/*
* If the window has opened, need to arrange
* to send additional data.
*/
if (new_swnd != 0) {
/* tcp_suna != tcp_snxt */
/* Packet contains a window update */
tcp->tcp_zero_win_probe = 0;
tcp->tcp_timer_backoff = 0;
tcp->tcp_ms_we_have_waited = 0;
/*
* Transmit starting with tcp_suna since
* the one byte probe is not ack'ed.
* If TCP has sent more than one identical
* probe, tcp_rexmit will be set. That means
* tcp_ss_rexmit() will send out the one
* byte along with new data. Otherwise,
* fake the retransmission.
*/
flags |= TH_XMIT_NEEDED;
if (!tcp->tcp_rexmit) {
tcp->tcp_dupack_cnt = 0;
}
}
}
goto swnd_update;
}
/*
* Check for "acceptability" of ACK value per RFC 793, pages 72 - 73.
* If the ACK value acks something that we have not yet sent, it might
* be an old duplicate segment. Send an ACK to re-synchronize the
* other side.
* Note: reset in response to unacceptable ACK in SYN_RECEIVE
* state is handled above, so we can always just drop the segment and
* send an ACK here.
*
* Should we send ACKs in response to ACK only segments?
*/
/* drop the received segment */
/*
* Send back an ACK. If tcp_drop_ack_unsent_cnt is
* greater than 0, check if the number of such
* bogus ACks is greater than that count. If yes,
* don't send back any ACK. This prevents TCP from
* getting into an ACK storm if somehow an attacker
* successfully spoofs an acceptable segment to our
* peer.
*/
if (tcp_drop_ack_unsent_cnt > 0 &&
return;
}
}
return;
}
/*
* TCP gets a new ACK, update the notsack'ed list to delete those
* blocks that are covered by this ACK.
*/
}
/*
* If we got an ACK after fast retransmit, check to see
* if it is a partial ACK. If it is not and the congestion
* window was inflated to account for the other side's
* cached packets, retract it. If it is, do Hoe's algorithm.
*/
tcp->tcp_dupack_cnt = 0;
/*
* Restore the orig tcp_cwnd_ssthresh after
* fast retransmit phase.
*/
}
tcp->tcp_cwnd_cnt = 0;
/*
* Remove all notsack info to avoid confusion with
* the next fast retrasnmit/recovery phase.
*/
if (tcp->tcp_snd_sack_ok &&
}
} else {
if (tcp->tcp_snd_sack_ok &&
} else {
/*
* Hoe's algorithm:
*
* Retransmit the unack'ed segment and
* restart fast recovery. Note that we
* need to scale back tcp_cwnd to the
* original value when we started fast
* recovery. This is to prevent overly
* aggressive behaviour in sending new
* segments.
*/
}
}
} else {
tcp->tcp_dupack_cnt = 0;
if (tcp->tcp_rexmit) {
/*
* TCP is retranmitting. If the ACK ack's all
* outstanding data, update tcp_rexmit_max and
* tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt
* to the correct value.
*
* Note that SEQ_LEQ() is used. This is to avoid
* unnecessary fast retransmit caused by dup ACKs
* received when TCP does slow start retransmission
* after a time out. During this phase, TCP may
* send out segments which are already received.
* This causes dup ACKs to be sent back.
*/
}
flags |= TH_XMIT_NEEDED;
}
} else {
}
tcp->tcp_ms_we_have_waited = 0;
}
}
if (tcp->tcp_zero_win_probe != 0) {
tcp->tcp_zero_win_probe = 0;
tcp->tcp_timer_backoff = 0;
}
/*
* If tcp_xmit_head is NULL, then it must be the FIN being ack'ed.
* Note that it cannot be the SYN being ack'ed. The code flow
* will not reach here.
*/
goto fin_acked;
}
/*
* Update the congestion window.
*
* If TCP is not ECN capable or TCP is ECN capable but the
* congestion experience bit is not set, increase the tcp_cwnd as
* usual.
*/
/*
* This is to prevent an increase of less than 1 MSS of
* tcp_cwnd. With partial increase, tcp_wput_data()
* may send out tinygrams in order to preserve mblk
* boundaries.
*
* By initializing tcp_cwnd_cnt to new tcp_cwnd and
* decrementing it by 1 MSS for every ACKs, tcp_cwnd is
* increased by 1 MSS for every RTTs.
*/
if (tcp->tcp_cwnd_cnt <= 0) {
} else {
add = 0;
}
}
}
/* See if the latest urgent data has been acknowledged */
/* Can we update the RTT estimates? */
if (tcp->tcp_snd_ts_ok) {
/* Ignore zero timestamp echo-reply. */
if (tcpopt.tcp_opt_ts_ecr != 0) {
}
/* If needed, restart the timer. */
tcp->tcp_set_timer = 0;
}
/*
* Update tcp_csuna in case the other side stops sending
* us timestamps.
*/
/*
* An ACK sequence we haven't seen before, so get the RTT
* and update the RTO. But first check if the timestamp is
* valid to use.
*/
else
/* Remeber the last sequence to be ACKed */
tcp->tcp_set_timer = 0;
}
} else {
}
/* Eat acknowledged bytes off the xmit queue. */
for (;;) {
if (bytes_acked < 0) {
/*
* Set a new timestamp if all the bytes timed by the
* old timestamp have been ack'ed.
*/
}
break;
}
/*
* This notification is required for some zero-copy
* clients to maintain a copy semantic. After the data
* is ack'ed, client is safe to modify or reuse the buffer.
*/
if (tcp->tcp_snd_zcopy_aware &&
if (bytes_acked == 0) {
/* Everything is ack'ed, clear the tail. */
/*
* Cancel the timer unless we are still
* waiting for an ACK for the FIN packet.
*/
if (tcp->tcp_timer_tid != 0 &&
(void) TCP_TIMER_CANCEL(tcp,
tcp->tcp_timer_tid);
tcp->tcp_timer_tid = 0;
}
goto pre_swnd_update;
}
break;
break;
}
/*
* More was acked but there is nothing more
* outstanding. This means that the FIN was
* just acked or that we're talking to a clown.
*/
if (tcp->tcp_fin_sent) {
/* FIN was acked - making progress */
!tcp->tcp_fin_acked)
if (tcp->tcp_linger_tid != 0 &&
tcp->tcp_linger_tid) >= 0) {
}
} else {
/*
* We should never get here because
* we have already checked that the
* number of bytes ack'ed should be
* smaller than or equal to what we
* have sent so far (it is the
* acceptability check of the ACK).
* We can only get here if the send
* queue is corrupted.
*
* Terminate the connection and
* panic the system. It is better
* for us to panic instead of
* continuing to avoid other disaster.
*/
panic("Memory corruption "
"detected for connection %s.",
/*NOTREACHED*/
}
goto pre_swnd_update;
}
}
if (tcp->tcp_unsent) {
flags |= TH_XMIT_NEEDED;
}
/*
* The following check is different from most other implementations.
* For bi-directional transfer, when segments are dropped, the
* "normal" check will not accept a window update in those
* retransmitted segemnts. Failing to do that, TCP may send out
* segments which are outside receiver's window. As TCP accepts
* the ack in those retransmitted segments, if the window update in
* the same segment is not accepted, TCP will incorrectly calculates
* that it can send more segments. This can create a deadlock
* with the receiver if its window becomes zero.
*/
/*
* The criteria for update is:
*
* 1. the segment acknowledges some data. Or
* 2. the segment is new, i.e. it has a higher seq num. Or
* 3. the segment is not old and the advertised window is
* larger than the previous advertised window.
*/
flags |= TH_XMIT_NEEDED;
}
est:
case TCPS_FIN_WAIT_1:
if (tcp->tcp_fin_acked) {
/*
* FIN_WAIT_2 flushing algorithm.
* If there is no user attached to this
* TCP endpoint, then this TCP struct
* could hang around forever in FIN_WAIT_2
* state if the peer forgets to send us
* a FIN. To prevent this, we wait only
* 2*MSL (a convenient time value) for
* the FIN to arrive. If it doesn't show up,
* we flush the TCP endpoint. This algorithm,
* though a violation of RFC-793, has worked
* for over 10 years in BSD systems.
* Note: SunOS 4.x waits 675 seconds before
* flushing the FIN_WAIT_2 connection.
*/
}
break;
case TCPS_FIN_WAIT_2:
break; /* Shutdown hook? */
case TCPS_LAST_ACK:
if (tcp->tcp_fin_acked) {
return;
}
goto xmit_check;
case TCPS_CLOSING:
if (tcp->tcp_fin_acked) {
/*
* Unconditionally clear the exclusive binding
* bit so this TIME-WAIT connection won't
* interfere with new ones.
*/
tcp->tcp_exclbind = 0;
if (!TCP_IS_DETACHED(tcp)) {
} else {
}
}
/*FALLTHRU*/
case TCPS_CLOSE_WAIT:
goto xmit_check;
default:
break;
}
}
/* Make sure we ack the fin */
flags |= TH_ACK_NEEDED;
if (!tcp->tcp_fin_rcvd) {
/*
* Generate the ordrel_ind at the end unless we
* are an eager guy.
* In the eager case tcp_rsrv will do this when run
* after tcp_accept is done.
*/
case TCPS_SYN_RCVD:
case TCPS_ESTABLISHED:
/* Keepalive? */
break;
case TCPS_FIN_WAIT_1:
if (!tcp->tcp_fin_acked) {
break;
}
/* FALLTHRU */
case TCPS_FIN_WAIT_2:
/*
* Unconditionally clear the exclusive binding
* bit so this TIME-WAIT connection won't
* interfere with new ones.
*/
tcp->tcp_exclbind = 0;
if (!TCP_IS_DETACHED(tcp)) {
} else {
}
if (seg_len) {
/*
* implies data piggybacked on FIN.
* break to handle data.
*/
break;
}
goto ack_check;
}
}
}
goto xmit_check;
if (seg_len == 0) {
goto xmit_check;
}
/*
* The header has been consumed, so we remove the
* zero-length mblk here.
*/
}
tcp->tcp_rack_cnt++;
{
/*
* We have more unacked data than we should - send
* an ACK now.
*/
flags |= TH_ACK_NEEDED;
cur_max++;
else
} else if (TCP_IS_DETACHED(tcp)) {
/* We don't have an ACK timer for detached TCP. */
flags |= TH_ACK_NEEDED;
/*
* If we get a segment that is less than an mss, and we
* already have unacknowledged data, and the amount
* unacknowledged is not a multiple of mss, then we
* better generate an ACK now. Otherwise, this may be
* the tail piece of a transaction, and we would rather
* wait for the response.
*/
flags |= TH_ACK_NEEDED;
else
} else {
/* Start delayed ack timer */
}
}
/* Update SACK list */
&(tcp->tcp_num_sack_blk));
}
if (tcp->tcp_urp_mp) {
/* Ready for a new signal. */
#ifdef DEBUG
"tcp_rput: sending exdata_ind %s",
#endif /* DEBUG */
}
/*
* Check for ancillary data changes compared to last segment.
*/
if (tcp->tcp_ipv6_recvancillary != 0) {
return;
}
/*
* Side queue inbound data until the accept happens.
* tcp_accept/tcp_rput drains this when the accept happens.
* M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or
* T_EXDATA_IND) it is queued on b_next.
* XXX Make urgent data use this. Requires:
* Removing tcp_listener check for TH_URG
* Making M_PCPROTO and MARK messages skip the eager case
*/
if (tcp->tcp_kssl_pending) {
} else {
}
} else {
/*
* If an sodirect connection and an enabled sodirect_t then
* sodirect_t and the sodirect_t's lock will be held.
*/
}
}
(flags & TH_MARKNEXT_NEEDED)) {
if (!SOD_QEMPTY(sodp) &&
/* sod_wakeup() did the mutex_exit() */
}
}
if (flags & TH_MARKNEXT_NEEDED) {
#ifdef DEBUG
"tcp_rput: sending MSGMARKNEXT %s",
#endif /* DEBUG */
flags &= ~TH_MARKNEXT_NEEDED;
}
/* Does this need SSL processing first? */
} else {
if (sodp) {
/*
* Done with sodirect, use putnext
* to push this non M_DATA headed
* mblk_t chain.
*/
}
}
/* Do SSL processing first */
/*
* Sodirect so all mblk_t's are queued on the
* socket directly, check for wakeup of blocked
* reader (if any), and last if flow-controled.
*/
/* sod_wakeup() did the mutex_exit() */
} else {
/* Q is full, need backenable */
}
}
/*
* Enqueue the new segment first and then
* call tcp_rcv_drain() to send all data
* up. The other way to do this is to
* send all queued data up and then call
* putnext() to send the new segment up.
* This way can remove the else part later
* on.
*
* We don't this to avoid one more call to
* canputnext() as tcp_rcv_drain() needs to
* call canputnext().
*/
} else {
}
} else {
/*
* Enqueue all packets when processing an mblk
* from the co queue and also enqueue normal packets.
*/
}
/*
* Make sure the timer is running if we have data waiting
* for a push bit. This provides resiliency against
* implementations that do not correctly generate push bits.
*
* Note, for sodirect if Q isn't empty and there's not a
* pending wakeup then we need a timer. Also note that sodp
* is assumed to be still valid after exit()ing the sod_lock
* above and while the SOD state can change it can only change
* such that the Q is empty now even though data was added
* above.
*/
tcp->tcp_push_tid == 0) {
/*
* The connection may be closed at this point, so don't
* do anything for a detached tcp.
*/
if (!TCP_IS_DETACHED(tcp))
}
}
/* Is there anything left to do? */
TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
goto done;
/* Any transmit work to do and a non-zero window? */
if (flags & TH_REXMIT_NEEDED) {
B_TRUE);
}
}
if (flags & TH_NEED_SACK_REXMIT) {
}
/*
* For TH_LIMIT_XMIT, tcp_wput_data() is called to send
* out new segment. Note that tcp_rexmit should not be
* set, otherwise TH_LIMIT_XMIT should not be set.
*/
if (!tcp->tcp_rexmit) {
} else {
}
}
/*
* Adjust tcp_cwnd back to normal value after sending
* new data segments.
*/
if (flags & TH_LIMIT_XMIT) {
/*
* This will restart the timer. Restarting the
* timer is used to avoid a timeout before the
* limited transmitted segment's ACK gets back.
*/
}
/* Anything more to do? */
TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
goto done;
}
if (flags & TH_SEND_URP_MARK) {
/*
* Send up any queued data and then send the mark message
*/
/* sod_wakeup() does the mutex_exit() */
}
#ifdef DEBUG
"tcp_rput: sending zero-length %s %s",
"MSGNOTMARKNEXT"),
#endif /* DEBUG */
flags &= ~TH_SEND_URP_MARK;
}
if (flags & TH_ACK_NEEDED) {
/*
* Time to send an ack for some reason.
*/
}
if (tcp->tcp_ack_tid != 0) {
tcp->tcp_ack_tid = 0;
}
}
if (flags & TH_ACK_TIMER_NEEDED) {
/*
* Arrange for deferred ACK or push wait timeout.
* Start timer if it is not already running.
*/
if (tcp->tcp_ack_tid == 0) {
}
}
if (flags & TH_ORDREL_NEEDED) {
/*
* Send up the ordrel_ind unless we are an eager guy.
* In the eager case tcp_rsrv will do this when run
* after tcp_accept is done.
*/
/* No more sodirect */
if (!SOD_QEMPTY(sodp)) {
/* Mblk(s) to process, notify */
/* sod_wakeup() does the mutex_exit() */
} else {
/* Nothing to process */
}
/*
* Push any mblk(s) enqueued from co processing.
*/
}
if (tcp->tcp_deferred_clean_death) {
/*
* tcp_clean_death was deferred
* for T_ORDREL_IND - do it now
*/
(void) tcp_clean_death(tcp,
}
} else {
/*
* Run the orderly release in the
* service routine.
*/
/*
* Caveat(XXX): The machine may be so
* overloaded that tcp_rsrv() is not scheduled
* until after the endpoint has transitioned
* to TCPS_TIME_WAIT
* and tcp_time_wait_interval expires. Then
* tcp_timer() will blow away state in tcp_t
* and T_ORDREL_IND will never be delivered
* upstream. Unlikely but potentially
* a problem.
*/
}
}
done:
}
/*
* This function does PAWS protection check. Returns B_TRUE if the
* segment passes the PAWS test, else returns B_FALSE.
*/
{
int options;
/*
* If timestamp option is aligned nicely, get values inline,
* otherwise call general routine to parse. Only do that
* if timestamp is the only option.
*/
} else {
if (tcp->tcp_snd_sack_ok) {
} else {
}
}
if (options & TCP_OPT_TSTAMP_PRESENT) {
/*
* Do PAWS per RFC 1323 section 4.2. Accept RST
* regardless of the timestamp, page 18 RFC 1323.bis.
*/
tcp->tcp_ts_recent)) {
PAWS_TIMEOUT)) {
/* This segment is not acceptable. */
return (B_FALSE);
} else {
/*
* Connection has been idle for
* too long. Reset the timestamp
* and assume the segment is valid.
*/
tcp->tcp_ts_recent =
}
}
} else {
/*
* If we don't get a timestamp on every packet, we
* figure we can't really trust 'em, so we stop sending
* and parsing them.
*/
/*
* Adjust the tcp_mss accordingly. We also need to
* adjust tcp_cwnd here in accordance with the new mss.
* But we avoid doing a slow start here so as to not
* to lose on the transfer rate built up so far.
*/
if (tcp->tcp_snd_sack_ok) {
}
}
return (B_TRUE);
}
/*
* Attach ancillary data to a received TCP segments for the
* ancillary pieces requested by the application that are
* different than they were in the previous data segment.
*
* Save the "current" values once memory allocation is ok so that
* when memory allocation fails we can just wait for the next data segment.
*/
static mblk_t *
{
struct T_optdata_ind *todi;
int optlen;
optlen = 0;
addflag = 0;
/* If app asked for pktinfo and the index has changed ... */
sizeof (struct in6_pktinfo);
}
/* If app asked for hoplimit and it has changed ... */
}
/* If app asked for tclass and it has changed ... */
}
/*
* If app asked for hopbyhop headers and it has changed ...
* For security labels, note that (1) security labels can't change on
* a connected socket at all, (2) we're connected to at most one peer,
* (3) if anything changes, then it must be some other extra option.
*/
return (mp);
}
/* If app asked for dst headers before routing headers ... */
return (mp);
}
/* If app asked for routing headers and it has changed ... */
return (mp);
}
/* If app asked for dest headers and it has changed ... */
if ((tcp->tcp_ipv6_recvancillary &
return (mp);
}
if (optlen == 0) {
/* Nothing to add */
return (mp);
}
/*
* Defer sending ancillary data until the next TCP segment
* arrives.
*/
return (mp);
}
/*
* If app asked for pktinfo and the index has changed ...
* Note that the local address never changes for the connection.
*/
if (addflag & TCP_IPV6_RECVPKTINFO) {
struct in6_pktinfo *pkti;
else
/* Save as "last" value */
}
/* If app asked for hoplimit and it has changed ... */
if (addflag & TCP_IPV6_RECVHOPLIMIT) {
/* Save as "last" value */
}
/* If app asked for tclass and it has changed ... */
if (addflag & TCP_IPV6_RECVTCLASS) {
/* Save as "last" value */
}
if (addflag & TCP_IPV6_RECVHOPOPTS) {
/* Save as last value */
}
if (addflag & TCP_IPV6_RECVRTDSTOPTS) {
/* Save as last value */
}
if (addflag & TCP_IPV6_RECVRTHDR) {
/* Save as last value */
}
/* Save as last value */
}
return (mp);
}
/*
* Handle a *T_BIND_REQ that has failed either due to a T_ERROR_ACK
* or a "bad" IRE detected by tcp_adapt_ire.
* We can't tell if the failure was due to the laddr or the faddr
* thus we clear out all addresses and ports.
*/
static void
{
struct T_error_ack *tea;
}
case T_BIND_ACK:
/*
* Need to unbind with classifier since we were just told that
* our bind succeeded.
*/
/* Reuse the mblk if possible */
sizeof (*tea));
} else {
}
break;
case T_ERROR_ACK:
break;
default:
panic("tcp_bind_failed: unexpected TPI type");
/*NOTREACHED*/
}
else
/*
* Copy of the src addr. in tcp_t is needed since
* the lookup funcs. can only look at tcp_t
*/
/* blow away saved option results if any */
}
/*
* tcp_rput_other is called by tcp_rput to handle everything other than M_DATA
* messages.
*/
void
{
struct T_error_ack *tea;
int retval;
case M_PROTO:
case M_PCPROTO:
break;
case T_BIND_ACK:
/*
* Adapt Multidata information, if any. The
* following tcp_mdt_update routine will free
* the message.
*/
}
/*
* Check to update LSO information with tcp, and
* tcp_lso_update routine will free the message.
*/
}
/* Get the IRE, if we had requested for it */
if (tcp->tcp_hard_binding) {
} else {
goto after_syn_sent;
}
if (retval == 0) {
ENETUNREACH : EADDRNOTAVAIL));
return;
}
/*
* Don't let an endpoint connect to itself.
* Also checked in tcp_connect() but that
* check can't handle the case when the
* local IP address is INADDR_ANY.
*/
return;
}
} else {
if (IN6_ARE_ADDR_EQUAL(
return;
}
}
/*
* This should not be possible! Just for
* defensive coding...
*/
goto after_syn_sent;
if (is_system_labeled() &&
return;
}
/*
* tcp_adapt_ire() does not adjust
*/
/*
* Just make sure our rwnd is at
* least tcp_recv_hiwat_mss * MSS
* large, and round up to the nearest
* MSS.
*
* We do the round up here because
* we need to get the interface
* MTU first before we can do the
* round up.
*/
/*
* Set tcp_snd_ts_ok to true
* so that tcp_xmit_mp will
* include the timestamp
* option in the SYN segment.
*/
if (tcps->tcps_tstamp_always ||
}
/*
* tcp_snd_sack_ok can be set in
* tcp_adapt_ire() if the sack metric
* is set. So check it here also.
*/
tcp->tcp_snd_sack_ok) {
tcp->tcp_sack_info =
KM_SLEEP);
}
}
/*
* Should we use ECN? Note that the current
* default value (SunOS 5.9) of tcp_ecn_permitted
* is 1. The reason for doing this is that there
* are equipments out there that will drop ECN
* enabled IP packets. Setting it to 1 avoids
* compatibility problems.
*/
if (syn_mp) {
/*
* Obtain the credential from the
* thread calling connect(); the credential
* lives on in the second mblk which
* originated from T_CONN_REQ and is echoed
* with the T_BIND_ACK from ip. If none
* can be found, default to the creator
* of the socket.
*/
} else {
}
}
/*
* A trailer mblk indicates a waiting client upstream.
* We complete here the processing begun in
* either tcp_bind() or tcp_connect() by passing
* upstream the reply message they supplied.
*/
if (mp)
break;
return;
case T_ERROR_ACK:
"tcp_rput_other: case T_ERROR_ACK, "
"ERROR_prim == %d",
tea->ERROR_prim);
}
switch (tea->ERROR_prim) {
case O_T_BIND_REQ:
case T_BIND_REQ:
ENETUNREACH : EADDRNOTAVAIL));
return;
case T_UNBIND_REQ:
}
if (tcp->tcp_unbind_pending)
tcp->tcp_unbind_pending = 0;
else {
/* From tcp_ip_unbind() - free */
return;
}
break;
case T_SVR4_OPTMGMT_REQ:
if (tcp->tcp_drop_opt_ack_cnt > 0) {
/* T_OPTMGMT_REQ generated by TCP */
printf("T_SVR4_OPTMGMT_REQ failed "
"%d/%d - dropped (cnt %d)\n",
return;
}
break;
}
tcp->tcp_drop_opt_ack_cnt > 0) {
printf("T_SVR4_OPTMGMT_REQ failed %d/%d "
"- dropped (cnt %d)\n",
return;
}
break;
case T_OPTMGMT_ACK:
if (tcp->tcp_drop_opt_ack_cnt > 0) {
/* T_OPTMGMT_REQ generated by TCP */
return;
}
break;
default:
break;
}
break;
case M_FLUSH:
break;
default:
/* M_CTL will be directly sent to tcp_icmp_error() */
break;
}
/*
* Make sure we set this bit before sending the ACK for
* bind. Otherwise accept could possibly run and free
* this tcp struct.
*/
}
/*
* Called as the result of a qbufcall or a qtimeout to remedy a failure
* to allocate a T_ordrel_ind in tcp_rsrv(). qenable(q) will make
* tcp_rsrv() try again.
*/
static void
tcp_ordrel_kick(void *arg)
{
tcp->tcp_ordrelid = 0;
}
}
/* ARGSUSED */
static void
{
return;
}
/*
* Normally we would not get backenabled in synchronous
* streams mode, but in case this happens, we need to plug
* synchronous streams during our drain to prevent a race
* with tcp_fuse_rrw() or tcp_fuse_rinfop().
*/
} else {
}
if (peer_tcp->tcp_flow_stopped &&
(TCP_UNSENT_BYTES(peer_tcp) <=
peer_tcp->tcp_xmit_lowater)) {
}
return;
}
/* An sodirect connection */
/* Flow-controlled, need another back-enable */
} else {
/* Not flow-controlled */
}
} else if (canputnext(q)) {
/* STREAMS, not flow-controlled */
} else {
/* STREAMS, flow-controlled */
}
if (!fc) {
/* Not flow-controlled, open rwnd */
<< tcp->tcp_rcv_ws;
/*
* Send back a window update immediately if TCP is above
* ESTABLISHED state and the increase of the rcv window
* that the other side knows is at least 1 MSS after flow
* control is lifted.
*/
}
}
/* Handle a failure to allocate a T_ORDREL_IND here */
/* No more sodirect */
if (!SOD_QEMPTY(sodp)) {
/* Notify mblk(s) to process */
/* sod_wakeup() does the mutex_exit() */
} else {
/* Nothing to process */
}
/*
* Push any mblk(s) enqueued from co processing.
*/
}
mp = mi_tpi_ordrel_ind();
if (mp) {
if (tcp->tcp_deferred_clean_death) {
/*
* tcp_clean_death was deferred for
* T_ORDREL_IND - do it now
*/
(void) tcp_clean_death(tcp,
}
/*
* If there isn't already a timer running
* start one. Use a 4 second
* timer as a fallback since it can't fail.
*/
MSEC_TO_TICK(4000));
}
}
}
/*
* The read side service routine is called mostly when we get back-enabled as a
* result of flow control relief. Since we don't actually queue anything in
* TCP, we have no data to send out of here. What we do is clear the receive
* window, and send out a window update.
* This routine is also called to drive an orderly release message upstream
* if the attempt in tcp_rput failed.
*/
static void
{
/* No code does a putq on the read side */
/* Nothing to do for the default queue */
return;
}
/*
* We are under memory pressure. Return for now and we
* we will be called again later.
*/
/*
* If there isn't already a timer running
* start one. Use a 4 second
* timer as a fallback since it can't fail.
*/
MSEC_TO_TICK(4000));
}
return;
}
}
/*
* tcp_rwnd_set() is called to adjust the receive window to a desired value.
* We do not allow the receive window to shrink. After setting rwnd,
* set the flow control hiwat of the stream.
*
* This function is called in 2 cases:
*
* 1) Before data transfer begins, in tcp_accept_comm() for accepting a
* connection (passive open) and in tcp_rput_data() for active connect.
* This is called after tcp_mss_set() when the desired MSS value is known.
* This makes sure that our window size is a mutiple of the other side's
* MSS.
* 2) Handling SO_RCVBUF option.
*
* It is ASSUMED that the requested size is a multiple of the current MSS.
*
* XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
* user requests so.
*/
static int
{
/*
* Record the stream head's high water mark for
* this endpoint; this is used for flow-control
* purposes in tcp_fuse_output().
*/
if (!tcp_detached)
/*
* In the fusion case, the maxpsz stream head value of
* our peer is set according to its send buffer size
* and our receive buffer size; since the latter may
* have changed we need to update the peer's maxpsz.
*/
return (rwnd);
}
if (tcp_detached)
else
/*
* Insist on a receive window that is at least
* tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
* funny TCP interactions of Nagle algorithm, SWS avoidance
* and delayed acknowledgement.
*/
/*
* If window size info has already been exchanged, TCP should not
* shrink the window. Shrinking window is doable if done carefully.
* We may add that support later. But so far there is not a real
* need to do that.
*/
/* MSS may have changed, do a round up again. */
}
/*
* tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
* can be applied even before the window scale option is decided.
*/
if (rwnd > max_transmittable_rwnd) {
/*
* If we're over the limit we may have to back down tcp_rwnd.
* The increment below won't work for us. So we set all three
* here and the increment below will have no effect.
*/
}
if (tcp->tcp_localnet) {
} else {
/*
* For a remote host on a different subnet (through a router),
* we ack every other packet to be conforming to RFC1122.
* tcp_deferred_acks_max is default to 2.
*/
}
else
tcp->tcp_rack_cur_max = 0;
/*
* Increment the current rwnd by the amount the maximum grew (we
* can not overwrite it since we might be in the middle of a
* connection.)
*/
if (tcp_detached)
return (rwnd);
/*
* We set the maximum receive window into rq->q_hiwat.
* This is not actually used for flow control.
*/
/*
* Set the Stream head high water mark. This doesn't have to be
* here, since we are simply using default values, but we would
* prefer to choose these values algorithmically, with a likely
* relationship to rwnd.
*/
return (rwnd);
}
/*
* Return SNMP stuff in buffer in mpdata.
*/
mblk_t *
{
int i;
int v4_conn_idx;
int v6_conn_idx;
/*
* make a copy of the original message
*/
return (NULL);
}
/* build table of connections -- need count in fixed part */
ispriv =
v4_conn_idx = v6_conn_idx = 0;
for (i = 0; i < CONN_G_HASH_SIZE; i++) {
while ((connp =
continue; /* not in this zone */
tcp->tcp_ibsegs = 0;
tcp->tcp_obsegs = 0;
}
}
/* Create a message to report on IPv6 entries */
/* Don't want just anybody seeing these... */
if (ispriv) {
} else {
/*
* Netstat, unfortunately, uses this to
* Why not compute the difference only?
*/
}
if (needattr)
}
/*
* Create an IPv4 table entry for IPv4 entries and also
* for IPv6 entries which are bound to in6addr_any
* but don't have IPV6_V6ONLY set.
* (i.e. anything an IPv4 peer could connect to)
*/
} else {
}
/* Don't want just anybody seeing these... */
if (ispriv) {
} else {
/*
* Netstat, unfortunately, uses this to
* to fix?
* Why not compute the difference only?
*/
}
if (needattr)
(void) snmp_append_data2(
&mp_attr_tail, (char *)&mlp,
sizeof (mlp));
}
}
}
/* fixed length structure for IPv4 and IPv6 counters */
sizeof (mib2_tcp6ConnEntry_t));
/* synchronize 32- and 64-bit counters */
/* table of connections... */
sizeof (struct T_optmgmt_ack)];
qreply(q, mp_conn_ctl);
/* table of MLP attributes... */
sizeof (struct T_optmgmt_ack)];
else
qreply(q, mp_attr_ctl);
/* table of IPv6 connections... */
sizeof (struct T_optmgmt_ack)];
qreply(q, mp6_conn_ctl);
/* table of IPv6 MLP attributes... */
sizeof (struct T_optmgmt_ack)];
else
qreply(q, mp6_attr_ctl);
return (mp2ctl);
}
/* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */
/* ARGSUSED */
int
{
switch (level) {
case MIB2_TCP:
switch (name) {
case 13:
return (0);
/* TODO: delete entry defined by tce */
return (1);
default:
return (0);
}
default:
return (1);
}
}
/* Translate TCP state to MIB2 TCP state. */
static int
{
return (0);
case TCPS_CLOSED:
case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */
case TCPS_BOUND:
return (MIB2_TCP_closed);
case TCPS_LISTEN:
return (MIB2_TCP_listen);
case TCPS_SYN_SENT:
return (MIB2_TCP_synSent);
case TCPS_SYN_RCVD:
return (MIB2_TCP_synReceived);
case TCPS_ESTABLISHED:
return (MIB2_TCP_established);
case TCPS_CLOSE_WAIT:
return (MIB2_TCP_closeWait);
case TCPS_FIN_WAIT_1:
return (MIB2_TCP_finWait1);
case TCPS_CLOSING:
return (MIB2_TCP_closing);
case TCPS_LAST_ACK:
return (MIB2_TCP_lastAck);
case TCPS_FIN_WAIT_2:
return (MIB2_TCP_finWait2);
case TCPS_TIME_WAIT:
return (MIB2_TCP_timeWait);
default:
return (0);
}
}
static char tcp_report_header[] =
"TCP " MI_COL_HDRPAD_STR
"zone dest snxt suna "
"swnd rnxt rack rwnd rto mss w sw rw t "
"recent [lport,fport] state";
/*
* TCP status report triggered via the Named Dispatch mechanism.
*/
/* ARGSUSED */
static void
{
char cflag;
char buf[80];
if (buf_len <= 0)
return;
if (hashval >= 0)
else
hash[0] = '\0';
/*
* Note that we use the remote address in the tcp_b structure.
* This means that it will print out the real destination address,
* not the next hop's address if source routing is used. This
* avoid the confusion on the output because user may not
* know that source routing is used for a connection.
*/
} else {
}
/*
* the ispriv checks are so that normal users cannot determine
* sequence number information using NDD.
*/
if (TCP_IS_DETACHED(tcp))
cflag = '*';
else
cflag = ' ';
"%010d %05ld %05d %1d %02d %02d %1d %08x %s%c\n",
hash,
(void *)tcp,
} else {
}
}
/*
* TCP status report (for listeners only) triggered via the Named Dispatch
* mechanism.
*/
/* ARGSUSED */
static void
{
char addrbuf[INET6_ADDRSTRLEN];
if (buf_len <= 0)
return;
} else {
}
"%03d "
"%d %s %05u %08u %d/%d/%d%c\n",
} else {
}
}
/* TCP status report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
/*
* Because of the ndd constraint, at most we can have 64K buffer
* to put in all TCP info. So to be more efficient, just
* allocate a 64K buffer here, assuming we need that large buffer.
* This may be a problem as any user can read tcp_status. Therefore
* we limit the rate of doing this using tcp_ndd_get_info_interval.
* This should be OK as normal users should not do this too often.
*/
return (0);
}
}
/* The following may work even if we cannot get a large buf. */
return (0);
}
for (i = 0; i < CONN_G_HASH_SIZE; i++) {
while ((connp =
if (zoneid != GLOBAL_ZONEID &&
continue;
cr);
}
}
return (0);
}
/* TCP status report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
/* Refer to comments in tcp_status_report(). */
return (0);
}
}
/* The following may work even if we cannot get a large buf. */
return (0);
}
for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
if (zoneid != GLOBAL_ZONEID &&
continue;
}
}
return (0);
}
/* TCP status report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
/* Refer to comments in tcp_status_report(). */
return (0);
}
}
/* The following may work even if we cannot get a large buf. */
return (0);
}
(void) mi_mpprintf(mp,
" TCP " MI_COL_HDRPAD_STR
for (i = 0; i < ipst->ips_ipcl_bind_fanout_size; i++) {
while ((connp =
if (zoneid != GLOBAL_ZONEID &&
continue;
}
}
return (0);
}
/* TCP status report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
/* Refer to comments in tcp_status_report(). */
return (0);
}
}
/* The following may work even if we cannot get a large buf. */
return (0);
}
for (i = 0; i < ipst->ips_ipcl_conn_fanout_size; i++) {
while ((connp =
if (zoneid != GLOBAL_ZONEID &&
continue;
}
}
return (0);
}
/* TCP status report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
/* Refer to comments in tcp_status_report(). */
return (0);
}
}
/* The following may work even if we cannot get a large buf. */
return (0);
}
for (i = 0; i < TCP_FANOUT_SIZE; i++) {
if (zoneid != GLOBAL_ZONEID &&
continue;
}
}
return (0);
}
/*
* tcp_timer is the timer service routine. It handles the retransmission,
* FIN_WAIT_2 flush, and zero window probe timeout events. It figures out
* from the state of the tcp instance what kind of action needs to be done
* at the time it is called.
*/
static void
{
tcp->tcp_timer_tid = 0;
return;
case TCPS_IDLE:
case TCPS_BOUND:
case TCPS_LISTEN:
return;
case TCPS_SYN_RCVD: {
/* it's our first timeout */
/*
* Make this eager available for drop if we
* need to drop one to accomodate a new
* incoming SYN request.
*/
}
if (!listener->tcp_syn_defense &&
/* We may be under attack. Put on a defense. */
"rate! System (port %d) may be under a "
"SYN flood attack!",
IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t),
}
!tcp->tcp_closemp_used) {
/*
* This is our second timeout. Put the tcp in
* the list of droppable eagers to allow it to
* be dropped, if needed. We don't check
* whether tcp_dontdrop is set or not to
* protect ourselve from a SYN attack where a
* remote host can spoof itself as one of the
* good IP source and continue to hold
* resources too long.
*/
}
}
}
/* FALLTHRU */
case TCPS_SYN_SENT:
break;
case TCPS_ESTABLISHED:
case TCPS_FIN_WAIT_1:
case TCPS_CLOSING:
case TCPS_CLOSE_WAIT:
case TCPS_LAST_ACK:
/* If we have data to rexmit */
if (!tcp->tcp_xmit_head)
break;
time_to_wait = lbolt -
/*
* If the timer fires too early, 1 clock tick earlier,
* restart the timer.
*/
if (time_to_wait > msec_per_tick) {
return;
}
/*
* When we probe zero windows, we force the swnd open.
* If our peer acks with a closed window swnd will be
* set to zero by tcp_rput(). As long as we are
* receiving acks tcp_rput will
* reset 'tcp_ms_we_have_waited' so as not to trip the
* first and second interval actions. NOTE: the timer
* interval is allowed to continue its exponential
* backoff.
*/
SL_TRACE, "tcp_timer: zero win");
}
} else {
/*
* After retransmission, we need to do
* slow start. Set the ssthresh to one
* half of current effective window and
* cwnd to one MSS. Also reset
* tcp_cwnd_cnt.
*
* Note that if tcp_ssthresh is reduced because
* of ECN, do not reduce it again unless it is
* already one window of data away (tcp_cwr
* should then be cleared) or this is a
* timeout for a retransmitted segment.
*/
}
tcp->tcp_cwnd_cnt = 0;
if (tcp->tcp_ecn_ok) {
}
}
break;
}
/*
* We have something to send yet we cannot send. The
* reason can be:
*
* 1. Zero send window: we need to do zero window probe.
* 2. Zero cwnd: because of ECN, we need to "clock out
* segments.
* 3. SWS avoidance: receiver may have shrunk window,
* reset our knowledge.
*
* Note that condition 2 can happen with either 1 or
* 3. But 1 and 3 are exclusive.
*/
if (tcp->tcp_unsent != 0) {
/*
* Set tcp_cwnd to 1 MSS so that a
* new segment can be sent out. We
* are "clocking out" new data when
* the network is really congested.
*/
}
/* Extend window for zero window probe */
} else {
/*
* Handle timeout from sender SWS avoidance.
* Reset our knowledge of the max send window
* since the receiver might have reduced its
* receive buffer. Avoid setting tcp_max_swnd
* to one since that will essentially disable
* the SWS checks.
*
* Note that since we don't have a SWS
* state variable, if the timeout is set
* for ECN but not for SWS, this
* code will also be executed. This is
* fine as tcp_max_swnd is updated
* constantly and it will not affect
* anything.
*/
}
return;
}
/* Is there a FIN that needs to be to re retransmitted? */
!tcp->tcp_fin_acked)
break;
/* Nothing to do, return without restarting timer. */
return;
case TCPS_FIN_WAIT_2:
/*
* User closed the TCP endpoint and peer ACK'ed our FIN.
* We waited some time for for peer's FIN, but it hasn't
* arrived. We flush the connection now to avoid
* case where the peer has rebooted.
*/
if (TCP_IS_DETACHED(tcp)) {
} else {
}
return;
case TCPS_TIME_WAIT:
return;
default:
"tcp_timer: strange state (%d) %s",
}
return;
}
/*
* For zero window probe, we need to send indefinitely,
* unless we have not heard from the other side for some
* time...
*/
if ((tcp->tcp_zero_win_probe == 0) ||
second_threshold)) {
/*
* If TCP is in SYN_RCVD state, send back a
* RST|ACK as BSD does. Note that tcp_zero_win_probe
* should be zero in TCPS_SYN_RCVD state.
*/
tcp_xmit_ctl("tcp_timer: RST sent on timeout "
"in SYN_RCVD",
}
(void) tcp_clean_death(tcp,
return;
} else {
/*
* Set tcp_ms_we_have_waited to second_threshold
* so that in next timeout, we will do the above
* check (lbolt - tcp_last_recv_time). This is
* also to avoid overflow.
*
* We don't need to decrement tcp_timer_backoff
* to avoid overflow because it will be decremented
* later if new timeout value is greater than
* tcp_rexmit_interval_max. In the case when
* tcp_rexmit_interval_max is greater than
* second_threshold, it means that we will wait
* longer than second_threshold to send the next
* window probe.
*/
}
} else if (ms > first_threshold) {
tcp->tcp_xmit_head =
}
/*
* We have been retransmitting for too long... The RTT
* we calculated is probably incorrect. Reinitialize it.
* Need to compensate for 0 tcp_rtt_sa. Reset
* tcp_rtt_update so that we won't accidentally cache a
* bad value. But only do this if this is not a zero
* window probe.
*/
tcp->tcp_rtt_sa = 0;
tcp->tcp_rtt_update = 0;
}
}
tcp->tcp_timer_backoff++;
/*
* This means the original RTO is tcp_rexmit_interval_min.
* So we will use tcp_rexmit_interval_min as the RTO value
* and do the backoff.
*/
} else {
}
/*
* ms is at max, decrement tcp_timer_backoff to avoid
* overflow.
*/
tcp->tcp_timer_backoff--;
}
if (tcp->tcp_zero_win_probe == 0) {
}
/*
* This is after a timeout and tcp_rto is backed off. Set
* tcp_set_timer to 1 so that next time RTO is updated, we will
* restart the timer with a correct value.
*/
B_TRUE);
/*
* When slow start after retransmission begins, start with
* this seq no. tcp_rexmit_max marks the end of special slow
* start phase. tcp_snd_burst controls how many segments
* can be sent because of an ack.
*/
(tcp->tcp_unsent == 0)) {
} else {
}
tcp->tcp_dupack_cnt = 0;
/*
* Remove all rexmit SACK blk to start from fresh.
*/
tcp->tcp_num_notsack_blk = 0;
tcp->tcp_cnt_notsack_list = 0;
}
return;
}
/* Attach credentials to retransmitted initial SYNs. */
}
}
/* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */
static void
{
case TCPS_BOUND:
case TCPS_LISTEN:
break;
default:
return;
}
/*
* Need to clean up all the eagers since after the unbind, segments
* will no longer be delivered to this listener stream.
*/
tcp_eager_cleanup(tcp, 0);
}
} else {
}
/* Send M_FLUSH according to TPI */
}
/*
* Don't let port fall into the privileged range.
* Since the extra privileged ports can be arbitrary we also
* ensure that we exclude those from consideration.
* tcp_g_epriv_ports is not sorted thus we loop over it until
* there are no changes.
*
* Note: No locks are held when inspecting tcp_g_*epriv_ports
* but instead the code relies on:
* - the fact that the address of the array and its size never changes
* - the atomic assignment of the elements of the array
*
* Returns 0 if there are no more ports available.
*
* TS note: skip multilevel ports.
*/
static in_port_t
{
int i;
if (random && tcp_random_anon_port != 0) {
sizeof (in_port_t));
/*
* Unless changed by a sys admin, the smallest anon port
* is 32768 and the largest anon port is 65535. It is
* very likely (50%) for the random port to be smaller
* than the smallest anon port. When that happens,
* add port % (anon port range) to the smallest anon
* port to get the random port. It should fall into the
* valid anon port range.
*/
}
}
if (restart)
return (0);
}
for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
port++;
/*
* Make sure whether the port is in the
* valid range.
*/
goto retry;
}
}
if (is_system_labeled() &&
IPPROTO_TCP, B_TRUE)) != 0) {
port = i;
goto retry;
}
return (port);
}
/*
* Return the next anonymous port in the privileged port range for
* bind checking. It starts at IPPORT_RESERVED - 1 and goes
* downwards. This is the same behavior as documented in the userland
* library call rresvport(3N).
*
* TS note: skip multilevel ports.
*/
static in_port_t
{
if (restart)
return (0);
}
if (is_system_labeled() &&
goto retry;
}
return (next_priv_port--);
}
/* The write side r/w procedure. */
#if CCS_STATS
struct {
struct {
} wrw_stats;
#endif
/*
* Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO,
* messages.
*/
/* ARGSUSED */
static void
{
/*
* TCP is D_MP and qprocsoff() is done towards the end of the tcp_close.
* Once the close starts, streamhead and sockfs will not let any data
* packets come down (close ensures that there are no threads using the
* queue and no new threads will come down) but since qprocsoff()
* hasn't happened yet, a M_FLUSH or some non data message might
* get reflected back (in response to our own FLUSHRW) and get
* processed after tcp_close() is done. The conn would still be valid
* because a ref would have added but we need to check the state
* before actually processing the packet.
*/
return;
}
case M_IOCDATA:
break;
case M_FLUSH:
break;
default:
break;
}
}
/*
* The TCP fast path write put procedure.
* NOTE: the logic of the fast path is duplicated from tcp_wput_data()
*/
/* ARGSUSED */
void
{
int len;
int hdrlen;
int plen;
int usable;
/*
* Try and ASSERT the minimum possible references on the
* conn early enough. Since we are executing on write side,
* the connection is obviously not detached and that means
* there is a ref each for TCP and IP. Since we are behind
* the squeue, the minimum references needed are 3. If the
* conn is in classifier hash list, there should be an
* extra ref for that (we check both the possibilities).
*/
/* Bypass tcp protocol for fused tcp loopback */
return;
if (tcp->tcp_xmit_zc_clean)
/*
* Criteria for fast path:
*
* 1. no unsent data
* 2. single mblk in request
* 3. connection established
* 4. data in mblk
* 5. len <= mss
* 6. no tcp_valid bits
*/
if ((tcp->tcp_unsent != 0) ||
(len == 0) ||
(tcp->tcp_valid_bits != 0)) {
return;
}
/* queue new packet onto retransmission queue */
} else {
}
/* find out how much we can send */
/* BEGIN CSTYLED */
/*
* un-acked usable
* |--------------|-----------------|
* tcp_suna tcp_snxt tcp_suna+tcp_swnd
*/
/* END CSTYLED */
/* start sending from tcp_snxt */
/*
* Check to see if this connection has been idled for some
* time and no ACK is expected. If it is, we need to slow
* start again to get back the connection's "self-clock" as
* described in VJ's paper.
*
* Refer to the comment in tcp_mss_set() for the calculation
* of tcp_cwnd after idle.
*/
}
/* usable can be < 0 if the congestion window is smaller */
/* Can't send complete M_DATA in one shot */
goto slow;
}
if (tcp->tcp_flow_stopped &&
}
/*
* determine if anything to send (Nagle).
*
* 1. len < tcp_mss (i.e. small)
* 2. unacknowledged data present
* 3. len < nagle limit
* 4. last packet sent < nagle limit (previous packet sent)
*/
/*
* This was the first unsent packet and normally
* mss < xmit_hiwater so there is no need to worry
* about flow control. The next packet will go
* through the flow control check in tcp_wput_data().
*/
/* leftover work from above */
return;
}
/* len <= tcp->tcp_mss && len == unsent so no silly window */
}
/* we have always sent something */
tcp->tcp_rack_cnt = 0;
goto no_memory;
/* adjust tcp header information */
/* Update the latest receive window size in TCP header. */
} else {
}
/* see if we need to allocate a mblk for the headers */
/* NOTE: we assume allocb returns an OK_32PTR */
if (!mp) {
goto no_memory;
}
/* Leave room for Link Level header */
/* hdrlen = tcp->tcp_hdr_len; */
}
/* Fill in the timestamp option. */
if (tcp->tcp_snd_ts_ok) {
} else {
}
/* copy header into outgoing packet */
if (hdrlen -= 40) {
hdrlen >>= 2;
dst += 10;
src += 10;
do {
} while (--hdrlen);
}
/*
* Set the ECN info in the TCP header. Note that this
* is not the template header.
*/
if (tcp->tcp_ecn_ok) {
if (tcp->tcp_ecn_echo_on)
}
}
if (tcp->tcp_ip_forward_progress) {
}
return;
/*
* If we ran out of memory, we pretend to have sent the packet
* and that it was lost on the wire.
*/
return;
slow:
/* leftover work from above */
}
/*
* The function called through squeue to get behind eager's perimeter to
* finish the accept processing.
*/
/* ARGSUSED */
void
{
struct stroptions *stropt;
/*
* Drop the eager's ref on the listener, that was placed when
* this eager began life in tcp_conn_request.
*/
/*
* Someone blewoff the eager before we could finish
* the accept.
*
* The only reason eager exists it because we put in
* a ref on it when conn ind went up. We need to send
* a disconnect indication up while the last reference
* on the eager will be dropped by the squeue when we
* return.
*/
struct T_discon_ind *tdi;
/*
* Let us reuse the incoming mblk to avoid memory
* allocation failure problems. We know that the
* size of the incoming mblk i.e. stroptions is greater
* than sizeof T_discon_ind. So the reallocb below
* can't fail.
*/
B_FALSE);
if (tcp->tcp_issocket) {
tdi->SEQ_number = 0;
} else {
tdi->SEQ_number =
}
} else {
}
if (tcp->tcp_hard_binding) {
}
return;
}
}
/*
* For a loopback connection with tcp_direct_sockfs on, note that
* we don't have to protect tcp_rcv_list yet because synchronous
* streams has not yet been enabled and tcp_fuse_rrw() cannot
* possibly race with us.
*/
/*
* Set the max window size (tcp_rq->q_hiwat) of the acceptor
* properly. This is the first time we know of the acceptor'
* queue. So we do it here.
*/
/*
* Recv queue is empty, tcp_rwnd should not have changed.
* That means it should be equal to the listener's tcp_rwnd.
*/
} else {
#ifdef DEBUG
}
#endif
/* There is some data, add them back to get the max. */
}
/*
* This is the first time we run on the correct
* queue after tcp_accept. So fix all the q parameters
* here.
*/
/*
* Record the stream head's high water mark for this endpoint;
* this is used for flow-control purposes.
*/
/*
* Determine what write offset value to use depending on SACK and
* whether the endpoint is fused or not.
*/
/*
* For fused tcp loopback, set the stream head's write
* offset value to zero since we won't be needing any room
* since it would reduce the amount of work done by kmem.
* Non-fused tcp loopback case is handled separately below.
*/
/*
* Update the peer's transmit parameters according to
* our recently calculated high water mark value.
*/
} else if (tcp->tcp_snd_sack_ok) {
} else {
}
/*
* If this is endpoint is handling SSL, then reserve extra
* offset and space at the end.
* Also have the stream head allocate SSL3_MAX_RECORD_LEN packets,
* overriding the previous setting. The extra cost of signing and
* encrypting multiple MSS-size records (12 of them with Ethernet),
* instead of a single contiguous one by the stream head
* largely outweighs the statistical reduction of ACKs, when
* applicable. The peer will also save on decryption and verification
* costs.
*/
}
/* Send the options up */
/*
*
* Adjust receive window in case it had decreased
* (because there is data <=> tcp_rcv_list != NULL)
* while the connection was detached. Note that
* in case the eager was flow-controlled, w/o this
* code, the rwnd may never open up again!
*/
/* We drain directly in case of fused tcp loopback */
<< tcp->tcp_rcv_ws;
}
}
/* Sodirect, move from rcv_list */
}
tcp->tcp_rcv_cnt = 0;
/* sod_wakeup() did the mutex_exit() */
} else {
/* Not sodirect, drain */
(void) tcp_rcv_drain(q, tcp);
}
/*
* For fused tcp loopback, back-enable peer endpoint
* if it's currently flow-controlled.
*/
/*
* In order to change the peer's tcp_flow_stopped,
* we need to take locks for both end points. The
* highest address is taken first.
*/
} else {
}
if (peer_tcp->tcp_flow_stopped) {
}
}
}
mp = mi_tpi_ordrel_ind();
if (mp) {
if (tcp->tcp_deferred_clean_death) {
/*
* tcp_clean_death was deferred
* for T_ORDREL_IND - do it now
*/
(void) tcp_clean_death(tcp,
}
} else {
/*
* Run the orderly release in the
* service routine.
*/
qenable(q);
}
}
if (tcp->tcp_hard_binding) {
}
/* We can enable synchronous streams now */
}
if (tcp->tcp_ka_enabled) {
tcp->tcp_ka_last_intrvl = 0;
}
/*
* At this point, eager is fully established and will
* have the following references -
*
* 2 references for connection to exist (1 for TCP and 1 for IP).
* 1 reference for the squeue which will be dropped by the squeue as
* soon as this function returns.
* There will be 1 additonal reference for being in classifier
* hash list provided something bad hasn't happened.
*/
}
/*
* The function called through squeue to get behind listener's perimeter to
* send a deffered conn_ind.
*/
/* ARGSUSED */
void
{
/*
* If listener has closed, it would have caused a
*/
struct T_conn_ind *conn_ind;
/*
* We need to drop the ref on eager that was put
* tcp_rput_data() before trying to send the conn_ind
* to listener. The conn_ind was deferred in tcp_send_conn_ind
* and tcp_wput_accept() is sending this deferred conn_ind but
* listener is closed so we drop the ref.
*/
return;
}
}
/*
* This is the STREAMS entry point for T_CONN_RES coming down on
* Acceptor STREAM when sockfs listener does accept processing.
* Read the block comment on top of tcp_conn_request().
*/
void
{
struct T_conn_res *conn_res;
return;
}
case O_T_CONN_RES:
case T_CONN_RES:
/*
* We pass up an err ack if allocb fails. This will
* cause sockfs to issue a T_DISCON_REQ which will cause
* tcp_eager_blowoff to be called. sockfs will then call
* rq->q_qinfo->qi_qclose to cleanup the acceptor stream.
* we need to do the allocb up here because we have to
* make sure rq->q_qinfo->qi_qclose still points to the
* correct function (tcpclose_accept) in case allocb
* fails.
*/
return;
}
/*
* TCP is _D_SODIRECT and sockfs is directly above so
* save shared sodirect_t pointer (if any).
*
* If tcp_fused and sodirect enabled disable it.
*/
/* Fused, disable sodirect */
}
/* Put the ref for IP */
/*
* We should have minimum of 3 references on the conn
* at this point. One each for TCP and IP and one for
* the T_conn_ind that was sent up when the 3-way handshake
* completed. In the normal case we would also have another
* reference (making a total of 4) for the conn being in the
* classifier hash list. However the eager could have received
* an RST subsequently and tcp_closei_local could have removed
* the eager from the classifier hash list, hence we can't
* assert that reference.
*/
/*
* Send the new local address also up to sockfs. There
* should already be enough space in the mp that came
* down from soaccept().
*/
} else {
sin6->sin6_flowinfo = 0;
} else {
}
sin6->sin6_scope_id = 0;
sin6->__sin6_src_id = 0;
}
/*
* Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO
* from listener to acceptor. The message is chained on the
* bind_mp which tcp_rput_other will send down to IP.
*/
if (listener->tcp_bound_if != 0) {
/* allocate optmgmt req */
sizeof (int));
}
/* allocate optmgmt req */
}
/*
* listener->tcp_eager_prev_q0 points to the TAIL of the
* deferred T_conn_ind queue. We need to get to the head
* of the queue in order to send up T_conn_ind the same
* order as how the 3WHS is completed.
*/
break;
else
}
/* None of the pending eagers can be sent up now */
goto no_more_eagers;
/* Move from q0 to q */
/* Make sure the tcp isn't in the list of droppables */
/*
* Insert at end of the queue because sockfs sends
* down T_CONN_RES in chronological order. Leaving
* the older conn indications at front of the queue
* helps reducing search time.
*/
} else {
}
/* Need to get inside the listener perimeter */
}
/*
* At this point, the eager is detached from the listener
* but we still have an extra refs on eager (apart from the
* usual tcp references). The ref was placed in tcp_rput_data
* before sending the conn_ind in tcp_send_conn_ind.
* The ref will be dropped in tcp_accept_finish(). As sockfs
* has already established this tcp with it's own stream,
* it's OK to set tcp_detached to B_FALSE.
*/
return;
default:
return;
}
}
static int
{
switch (tcp->tcp_family) {
case AF_INET:
return (EINVAL);
break;
case AF_INET6:
return (EINVAL);
} else {
}
break;
}
return (0);
}
static int
{
return (ENOTCONN);
switch (tcp->tcp_family) {
case AF_INET:
return (EINVAL);
break;
case AF_INET6:
return (EINVAL);
}
break;
}
return (0);
}
/*
* Handle special out-of-band ioctl requests (see PSARC/2008/265).
*/
static void
{
void *data;
return;
}
case TI_GETPEERNAME:
break;
case TI_GETMYNAME:
break;
default:
break;
}
}
void
{
void (*output_proc)();
case M_DATA:
}
return;
case M_CMD:
tcp_wput_cmdblk(q, mp);
return;
case M_PROTO:
case M_PCPROTO:
/*
* if it is a snmp message, don't get behind the squeue
*/
} else {
"tcp_wput_proto, dropping one...");
}
return;
}
if (type == T_SVR4_OPTMGMT_REQ) {
cr)) {
/*
* This was a SNMP request
*/
return;
} else {
}
} else {
}
break;
case M_IOCTL:
/*
* Most ioctls can be processed right away without going via
* squeues - process them right here. Those that do require
* squeue (currently TCP_IOC_DEFAULT_Q and _SIOCSOCKFALLBACK)
* are processed by tcp_wput_ioctl().
*/
case TCP_IOC_ABORT_CONN:
tcp_ioctl_abort_conn(q, mp);
return;
case TI_GETPEERNAME:
case TI_GETMYNAME:
return;
case ND_SET:
/* nd_getset does the necessary checks */
case ND_GET:
return;
}
return;
case TCP_IOC_DEFAULT_Q:
/*
* Wants to be the default wq. Check the credentials
* first, the rest is executed via squeue.
*/
return;
}
break;
default:
break;
}
break;
default:
break;
}
}
/*
* Initial STREAMS write side put() procedure for sockets. It tries to
* handle the T_CAPABILITY_REQ which sockfs sends down while setting
* up the socket without using the squeue. Non T_CAPABILITY_REQ messages
* are handled by tcp_wput() as usual.
*
* All further messages will also be handled by tcp_wput() because we cannot
* be sure that the above short cut is safe later.
*/
static void
{
return;
}
}
static boolean_t
{
if (do_tcpzcopy == 2)
zc_enabled = B_TRUE;
connp->conn_dontroute == 0 &&
!connp->conn_nexthop_set &&
do_tcpzcopy == 1) {
/*
* the checks above closely resemble the fast path checks
* in tcp_send_data().
*/
ill_zerocopy_flags != 0);
}
}
}
if (!TCP_IS_DETACHED(tcp)) {
if (zc_enabled) {
} else {
}
}
return (zc_enabled);
}
static mblk_t *
{
if (do_tcpzcopy == 2)
return (bp);
else if (tcp->tcp_snd_zcopy_on) {
if (!TCP_IS_DETACHED(tcp)) {
}
}
}
/*
* Backoff from a zero-copy mblk by copying data to a new mblk and freeing
* the original desballoca'ed segmapped mblk.
*/
static mblk_t *
{
if (IS_VMLOANED_MBLK(bp)) {
/* fail to backoff; leave it for the next backoff */
return (bp);
}
if (fix_xmitlist)
else
}
if (fix_xmitlist) {
}
} else {
}
while (nbp) {
if (IS_VMLOANED_MBLK(nbp)) {
return (head);
}
if (fix_xmitlist)
else
}
if (fix_xmitlist) {
}
} else {
}
}
if (fix_xmitlist) {
}
return (head);
}
static void
{
if (tcp->tcp_detached)
return;
}
static boolean_t
{
} else {
/* force a recheck later on */
return (B_FALSE);
}
/*
* Since we are inside the squeue, there cannot be another
* thread in TCP trying to set the conn_ire_cache now. The
* check for IRE_MARK_CONDEMNED ensures that an interface
* unplumb thread has not yet started cleaning up the conns.
* Hence we don't need to grab the conn lock.
*/
if (CONN_CACHE_IRE(connp)) {
}
}
/*
* We can continue to use the ire but since it was
* not cached, we should drop the extra reference.
*/
if (!cached)
/*
* Rampart note: no need to select a new label here, since
* labels are not allowed to change during the life of a TCP
* connection.
*/
}
return (B_TRUE);
}
/*
* Called from tcp_send() or tcp_send_data() to find workable IRE.
*
* 0 = success;
* 1 = failed to find ire and ill.
*/
static boolean_t
{
else
return (B_FALSE);
return (B_FALSE);
}
/*
* Choose a good ill in the group to send the packets on.
*/
}
if (!tcp->tcp_ire_ill_check_done) {
}
return (B_TRUE);
}
static void
{
uint32_t hcksum_txflags = 0;
/*
* Drop off fast path for IPv6 and also if options are present or
* we need to resolve a TS label.
*/
!IPCL_IS_CONNECTED(connp) ||
!connp->conn_ulp_labeled ||
if (tcp->tcp_snd_zcopy_aware)
return;
}
if (tcp->tcp_snd_zcopy_aware)
return;
}
#ifndef _BIG_ENDIAN
#endif
/*
* because it was previously disabled due to changes in the ill;
* note that by doing it here, this re-enabling only applies when
* the packet is not dispatched through CALL_IP_WPUT().
*
* case, since that's how we ended up here. For IPv6, we do the
* re-enabling work in ip_xmit_v6(), albeit indirectly via squeue.
*/
/*
* Restore LSO for this connection, so that next time around
* it is eligible to go through tcp_lsosend() path again.
*/
ip1dbg(("tcp_send_data: reenabling LSO for connp %p on "
/*
* Restore MDT for this connection, so that next time around
* it is eligible to go through tcp_multisend() path again.
*/
ip1dbg(("tcp_send_data: reenabling MDT for connp %p on "
}
if (tcp->tcp_snd_zcopy_aware) {
/*
* we shouldn't need to reset ipha as the mp containing
* ipha should never be a zero-copy mp.
*/
}
}
/* pseudo-header checksum (do it in parts for IP header checksum) */
/* Software checksum? */
if (DB_CKSUMFLAGS(mp) == 0) {
}
/* Calculate IP header checksum if hardware isn't capable */
}
if (ILL_DLS_CAPABLE(ill)) {
/*
* Send the packet directly to DLD, where it may be queued
* depending on the availability of transmit resources at
* the media layer.
*/
} else {
}
}
}
/*
* This handles the case when the receiver has shrunk its win. Per RFC 1122
* if the receiver shrinks the window, i.e. moves the right window to the
* left, the we should not send new data, but should retransmit normally the
* old unacked data between suna and suna + swnd. We might has sent data
* that is now outside the new window, pretend that we didn't send it.
*/
static void
{
ASSERT(shrunk_count > 0);
/* Pretend we didn't send the data outside the window */
snxt -= shrunk_count;
/* Get the mblk and the offset in it per the shrunk window */
/* Reset all the values per the now shrunk window */
/*
* Make sure the timer is running so that we will probe a zero
* window.
*/
}
/*
* The TCP normal data output path.
* NOTE: the logic of the fast path is duplicated from this function.
*/
static void
{
int len;
int tail_unsent;
int tcpstate;
int usable = 0;
int32_t num_sack_blk = 0;
int mdt_thres;
int rc;
/*
* tcp_wput_data() with NULL mp should only be called when
* there is unsent data.
*/
/* Really tacky... but we need this for detached closes. */
goto data_null;
}
#if CCS_STATS
#endif
/*
* Don't allow data after T_ORDREL_REQ or T_DISCON_REQ,
* or before a connection attempt has begun.
*/
#ifdef DEBUG
"tcp_wput_data: data after ordrel, %s",
#else
"tcp_wput_data: data after ordrel, %s\n",
}
#endif /* DEBUG */
}
if (tcp->tcp_snd_zcopy_aware &&
if (tcp->tcp_flow_stopped &&
}
return;
}
/* Strip empties */
for (;;) {
if (len > 0)
break;
if (!mp) {
return;
}
}
/* If we are the first on the list ... */
} else {
/* If tiny tx and room in txq tail, pullup to save mblks. */
if (len < tcp_tx_pull_len &&
if (len == 1) {
} else {
}
if (tcp->tcp_snd_zcopy_aware &&
} else {
}
}
/* Tack on however many more positive length mblks we have */
do {
int tlen;
if (tlen <= 0) {
} else {
}
}
if (urgent)
usable = 1;
/*
* Note that tcp_mss has been adjusted to take into account the
* timestamp option if applicable. Because SACK options do not
* appear in every TCP segments and they are of variable lengths,
* they cannot be included in tcp_mss. Thus we need to calculate
* the actual segment length when we need to send a segment which
* includes SACK options.
*/
2 + TCPOPT_HEADER_LEN;
} else {
}
}
if (tcpstate == TCPS_SYN_RCVD) {
/*
* The three-way connection establishment handshake is not
* complete yet. We want to queue the data for transmission
* after entering ESTABLISHED state (RFC793). A jump to
* "done" label effectively leaves data on the queue.
*/
goto done;
} else {
int usable_r;
/*
* In the special case when cwnd is zero, which can only
* happen if the connection is ECN capable, return now.
* New segments is sent using tcp_timer(). The timer
* is set in tcp_rput_data().
*/
/*
* Note that tcp_cwnd is 0 before 3-way handshake is
* finished.
*/
return;
}
/* NOTE: trouble if xmitting while SYN not acked? */
/*
* Check if the receiver has shrunk the window. If
* tcp_wput_data() with NULL mp is called, tcp_fin_sent
* cannot be set as there is unsent data, so FIN cannot
* be sent out. Otherwise, we need to take into account
* of FIN as it consumes an "invisible" sequence number.
*/
if (usable_r < 0) {
/*
* The receiver has shrunk the window and we have sent
* -usable_r date beyond the window, re-adjust.
*
* If TCP window scaling is enabled, there can be
* round down error as the advertised receive window
* is actually right shifted n bits. This means that
* the lower n bits info is wiped out. It will look
* like the window is shrunk. Do a check here to
* see if the shrunk amount is actually within the
* error in window calculation. If it is, just
* return. Note that this check is inside the
* shrunk window check. This makes sure that even
* though tcp_process_shrunk_swnd() is not called,
* we will stop further processing.
*/
}
return;
}
/* usable = MIN(swnd, cwnd) - unacked_bytes */
/* usable = MIN(usable, unsent) */
/* usable = MAX(usable, {1 for urgent, 0 for data}) */
if (usable_r > 0) {
} else {
/* Bypass all other unnecessary processing. */
goto done;
}
}
/*
* "Our" Nagle Algorithm. This is not the same as in the old
* BSD. This is more in line with the true intent of Nagle.
*
* The conditions are:
* 1. The amount of unsent data (or amount of data which can be
* sent, whichever is smaller) is less than Nagle limit.
* 2. The last sent size is also less than Nagle limit.
* 3. There is unack'ed data.
* 4. Urgent pointer is not set. Send urgent data ignoring the
* Nagle algorithm. This reduces the probability that urgent
* bytes get "merged" together.
* 5. The app has not closed the connection. This eliminates the
* wait time of the receiving side waiting for the last piece of
* (small) data.
*
* If all are satisified, exit without sending anything. Note
* that Nagle limit can be smaller than 1 MSS. Nagle limit is
* the smaller of 1 MSS and global tcp_naglim_def (default to be
* 4095).
*/
goto done;
}
/*
* if the tcp->tcp_cork option is set, then we have to force
* TCP not to send partial segment (smaller than MSS bytes).
* We are calculating the usable now based on full mss and
* will save the rest of remaining data for later.
*/
goto done;
}
/* Update the latest receive window size in TCP header. */
/*
* Determine if it's worthwhile to attempt LSO or MDT, based on:
*
* 3. If the TCP connection is in ESTABLISHED state.
* 4. The TCP is not detached.
*
* If any of the above conditions have changed during the
* parameters accordingly.
*/
} else {
}
/* Anything other than detached is considered pathological */
if (!TCP_IS_DETACHED(tcp)) {
else
}
}
/* Use MDT if sendable amount is greater than the threshold */
(tcp->tcp_valid_bits == 0 ||
} else {
}
/* Pretend that all we were trying to send really got sent */
if (rc < 0 && tail_unsent < 0) {
do {
} while (tail_unsent < 0);
}
done:;
if (len) {
/*
* If new data was sent, need to update the notsack
* list, which is, afterall, data blocks that have
* not been sack'ed by the receiver. New data is
* not sack'ed.
*/
/* len is a negative value. */
&(tcp->tcp_num_notsack_blk),
&(tcp->tcp_cnt_notsack_list));
}
tcp->tcp_rack_cnt = 0;
}
/*
* Didn't send anything. Make sure the timer is running
* so that we will probe a zero window.
*/
}
/* Note that len is the amount we just sent but with a negative sign */
if (tcp->tcp_flow_stopped) {
}
}
}
/*
* tcp_fill_header is called by tcp_send() and tcp_multisend() to fill the
* outgoing TCP header with the template header, as well as other
*/
static void
{
int hdrlen;
/* Template header */
/* Header of outgoing packet */
/* dst and src are opaque 32-bit fields, used for copying */
/* Fill time-stamp option if needed */
if (tcp->tcp_snd_ts_ok) {
} else {
}
/*
* Copy the template header; is this really more efficient than
* but perhaps not for other scenarios.
*/
if (hdrlen -= 40) {
hdrlen >>= 2;
dst += 10;
src += 10;
do {
} while (--hdrlen);
}
/*
* Set the ECN info in the TCP header if it is not a zero
* window probe. Zero window probe is only sent in
* tcp_wput_data() and tcp_timer().
*/
if (tcp->tcp_ecn_echo_on)
}
}
/* Fill in SACK options */
if (num_sack_blk > 0) {
int32_t i;
wptr[0] = TCPOPT_NOP;
sizeof (sack_blk_t);
for (i = 0; i < num_sack_blk; i++) {
}
tcp_h->th_offset_and_rsrvd[0] +=
}
}
/*
* tcp_mdt_add_attrs() is called by tcp_multisend() in order to attach
* the destination address and SAP attribute, and if necessary, the
* hardware checksum offload attribute to a Multidata message.
*/
static int
{
/* Add global destination address & SAP attribute */
ip1dbg(("tcp_mdt_add_attrs: can't add global physical "
"destination address+SAP\n"));
return (-1);
}
/* Add global hwcksum attribute */
if (hwcksum &&
ip1dbg(("tcp_mdt_add_attrs: can't add global hardware "
"checksum attribute\n"));
return (-1);
}
return (0);
}
/*
* Smaller and private version of pdescinfo_t used specifically for TCP,
* which allows for only two payload spans per packet.
*/
/*
* tcp_multisend() is called by tcp_wput_data() for Multidata Transmit
* scheme, and returns one the following:
*
* -1 = failed allocation.
* 0 = success; burst count reached, or usable send window is too small,
* and that we'd rather wait until later before sending again.
*/
static int
const int mdt_thres)
{
int num_burst_seg, max_pld;
int pbuf_idx, pbuf_idx_nxt;
int err;
#ifdef _BIG_ENDIAN
#else
#endif
#define PREP_NEW_MULTIDATA() { \
cur_hdr_off = 0; \
add_buffer = B_TRUE; \
}
#define PREP_NEW_PBUF() { \
cur_pld_off = 0; \
first_snxt = *snxt; \
ASSERT(*tail_unsent > 0); \
}
/*
* Note that tcp will only declare at most 2 payload spans per
* packet, which is much lower than the maximum allowable number
* of packet spans per Multidata. For this reason, we use the
* privately declared and smaller descriptor info structure, in
* order to save some stack space.
*/
}
md_mp_head = NULL;
/*
* Before we go on further, make sure there is an IRE that we can
* use, and that the ILL supports MDT. Otherwise, there's no point
* in proceeding any further, and we should just hand everything
* off to the legacy path.
*/
goto legacy_send_no_md;
/*
* If we do support loopback for MDT (which requires modifications
* to the receiving paths), the following assertions should go away,
* and we would be sending the Multidata to loopback conn later on.
*/
if (!tcp->tcp_ire_ill_check_done) {
}
/*
* If the underlying interface conditions have changed, or if the
* new interface does not support MDT, go back to legacy path.
*/
/* don't go through this path anymore for this connection */
ip1dbg(("tcp_multisend: disabling MDT for connp %p on "
/* IRE will be released prior to returning */
goto legacy_send_no_md;
}
/*
* Check if we can take tcp fast-path. Note that "incomplete"
* ire's (where the link-layer for next hop is not resolved
* or where the fast-path header in nce_fp_mp is not available
* yet) are sent down the legacy (slow) path.
* NOTE: We should fix ip_xmit_v4 to handle M_MULTIDATA
*/
/* IRE will be released prior to returning */
goto legacy_send_no_md;
}
/* go to legacy path if interface doesn't support zerocopy */
/* IRE will be released prior to returning */
goto legacy_send_no_md;
}
/* does the interface support hardware checksum offload? */
hwcksum_flags = 0;
if (ILL_HCKSUM_CAPABLE(ill) &&
HCKSUM_IPHDRCKSUM)) && dohwcksum) {
}
/*
* Each header fragment consists of the leading extra space,
* We make sure that each header fragment begins on a 32-bit
* aligned memory address (tcp_mdt_hdr_head is already 32-bit
* aligned in tcp_mdt_update).
*/
/* are we starting from the beginning of data block? */
if (*tail_unsent == 0) {
}
/*
* Here we create one or more Multidata messages, each made up of
* one header buffer and up to N payload buffers. This entire
* operation is done within two loops:
*
* The outer loop mostly deals with creating the Multidata message,
* as well as the header buffer that gets added to it. It also
* links the Multidata messages together such that all of them can
* be sent down to the lower layer in a single putnext call; this
* linking behavior depends on the tcp_mdt_chain tunable.
*
* The inner loop takes an existing Multidata message, and adds
* one or more (up to tcp_mdt_max_pld) payload buffers to it. It
* packetizes those buffers by filling up the corresponding header
* buffer fragments with the proper IP and TCP headers, and by
* describing the layout of each packet in the packet descriptors
* that get added to the Multidata.
*/
do {
/*
* If usable send window is too small, or data blocks in
* transmit list are smaller than our threshold (i.e. app
* performs large writes followed by small ones), we hand
* off the control over to the legacy path. Note that we'll
* get back the control once it encounters a large block.
*/
/* send down what we've got so far */
if (md_mp_head != NULL) {
}
/*
* Pass control over to tcp_send(), but tell it to
* return to us once a large-size transmission is
* possible.
*/
mdt_thres)) <= 0) {
/* burst count reached, or alloc failed */
return (err);
}
/* tcp_send() may have sent everything, so check */
if (*usable <= 0) {
return (0);
}
/*
* We may have delivered the Multidata, so make sure
* to re-initialize before the next round.
*/
md_mp_head = NULL;
/* are we starting from the beginning of data block? */
if (*tail_unsent == 0) {
}
}
/*
* max_pld limits the number of mblks in tcp's transmit
* queue that can be added to a Multidata message. Once
* this counter reaches zero, no more additional mblks
* can be added to it. What happens afterwards depends
* on whether or not we are set to chain the Multidata
* messages. If we are to link them together, reset
* max_pld to its original value (tcp_mdt_max_pld) and
* prepare to create a new Multidata message which will
* get linked to md_mp_head. Else, leave it alone and
* let the inner loop break on its own.
*/
if (tcp_mdt_chain && max_pld == 0)
/* adding a payload buffer; re-initialize values */
if (add_buffer)
/*
* If we don't have a Multidata, either because we just
* (re)entered this outer loop, or after we branched off
* to tcp_send above, setup the Multidata and header
* buffer to be used.
*/
int md_hbuflen;
/*
* Calculate Multidata header buffer size large enough
* to hold all of the headers that can possibly be
* sent at this moment. We'd rather over-estimate
* the size than running out of space; this is okay
* since this buffer is small anyway.
*/
/*
* Start and stuff offset for partial hardware
* checksum offload; these are currently for IPv4.
* For full checksum offload, they are set to zero.
*/
if ((hwcksum_flags & HCK_PARTIALCKSUM)) {
} else {
stuff = IPV6_HDR_LEN +
}
} else {
}
/*
* Create the header buffer, Multidata, as well as
* any necessary attributes (destination address,
* SAP and hardware checksum offload) that should
* be associated with the Multidata message.
*/
ASSERT(cur_hdr_off == 0);
/* fastpath mblk */
/* hardware checksum enabled */
/* hardware checksum offsets */
/* hardware checksum flag */
hwcksum_flags, tcps) != 0)) {
/* Unlink message from the chain */
if (md_mp_head != NULL) {
md_mp);
/*
* We can't assert that rmvb
* did not return -1, since we
* may get here before linkb
* happens. We do, however,
* check if we just removed the
* only element in the list.
*/
if (err == 0)
md_mp_head = NULL;
}
/* md_hbuf gets freed automatically */
} else {
/* Either allocb or mmd_alloc failed */
}
/* send down what we've got so far */
if (md_mp_head != NULL) {
&rconfirm);
}
/*
* Too bad; let the legacy path handle this.
* We specify INT_MAX for the threshold, since
* we gave up with the Multidata processings
* and let the old path have it all.
*/
INT_MAX));
}
/* link to any existing ones, if applicable */
if (md_mp_head == NULL) {
md_mp_head = md_mp;
} else if (tcp_mdt_chain) {
}
}
/*
* Packetize the transmittable portion of the data block;
* each data block is essentially added to the Multidata
* as a payload buffer. We also deal with adding more
* than one payload buffers, which happens when the remaining
* packetized portion of the current payload buffer is less
* than MSS, while the next data block in transmit queue
* has enough data to make up for one. This "spillover"
* case essentially creates a split-packet, where portions
* of the packet's payload fragments may span across two
* virtually discontiguous address blocks.
*/
do {
/* one must remain NULL for DTRACE_IP_FASTPATH */
/*
* First time around for this payload buffer; note
* in the case of a spillover, the following has
* been done prior to adding the split-packet
* descriptor to Multidata, and we don't want to
* repeat the process.
*/
if (add_buffer) {
/*
* Have we reached the limit? We'd get to
* this case when we're not chaining the
* Multidata messages together, and since
* we're done, terminate this loop.
*/
if (max_pld == 0)
break; /* done */
goto legacy_send; /* out_of_mem */
}
zc_cap->ill_zerocopy_flags)) {
/* out_of_mem */
goto legacy_send;
}
}
/*
* Add a payload buffer to the Multidata; this
* operation must not fail, or otherwise our
* logic in this routine is broken. There
* is no memory allocation done by the
* routine, so any returned failure simply
* tells us that we've done something wrong.
*
* A failure tells us that either we're adding
* the same payload buffer more than once, or
* we're trying to add more buffers than
* allowed (max_pld calculation is wrong).
* None of the above cases should happen, and
* we panic because either there's horrible
*/
if (pbuf_idx < 0) {
"payload buffer logic error "
"detected for tcp %p mmd %p "
"pbuf %p (%d)\n",
}
--max_pld;
}
/*
* We spillover to the next payload buffer only
* if all of the following is true:
*
* 1. There is not enough data on the current
* payload buffer to make up `len',
* 2. We are allowed to send `len',
* 3. The next payload buffer length is large
* enough to accomodate `spill'.
*/
max_pld > 0) {
if (md_pbuf_nxt == NULL) {
goto legacy_send; /* out_of_mem */
}
zc_cap->ill_zerocopy_flags)) {
/* out_of_mem */
goto legacy_send;
}
}
/*
* See comments above on the first call to
* mmd_addpldbuf for explanation on the panic.
*/
if (pbuf_idx_nxt < 0) {
panic("tcp_multisend: "
"next payload buffer logic error "
"detected for tcp %p mmd %p "
"pbuf %p (%d)\n",
(void *)md_pbuf_nxt, pbuf_idx_nxt);
}
--max_pld;
} else if (spill > 0) {
/*
* If there's a spillover, but the following
* xmit_tail couldn't give us enough octets
* to reach "len", then stop the current
* Multidata creation and let the legacy
* tcp_send() path take over. We don't want
* to send the tiny segment as part of this
* Multidata for performance reasons; instead,
* we let the legacy path deal with grouping
* it with the subsequent small mblks.
*/
max_pld = 0;
break; /* done */
}
/*
* We can't spillover, and we are near
* the end of the current payload buffer,
* so send what's left.
*/
ASSERT(*tail_unsent > 0);
len = *tail_unsent;
}
/* tail_unsent is negated if there is a spillover */
*tail_unsent -= len;
/*
* Sender SWS avoidance; see comments in tcp_send();
* everything else is the same, except that we only
* do this here if there is no more data to be sent
* following the current xmit_tail. We don't check
* for 1-byte urgent data because we shouldn't get
* here if TCP_URG_VALID is set.
*/
((md_pbuf_nxt == NULL &&
(md_pbuf_nxt != NULL &&
(tcp->tcp_unsent -
!tcp->tcp_zero_win_probe) {
}
}
/*
* Prime pump for IP's checksumming on our behalf;
* include the adjustment for a source route if any.
* offload, as this field gets zeroed out later for
* the full hardware checksum offload case.
*/
if (!(hwcksum_flags & HCK_FULLCKSUM)) {
}
/*
* We set the PUSH bit only if TCP has no more buffered
* data to be transmitted (or if sender SWS avoidance
* takes place), as opposed to setting it for every
* last packet in the burst.
*/
if (done ||
/*
* Set FIN bit if this is our last segment; snxt
* already includes its length, and it will not
* be adjusted after this point.
*/
if (!tcp->tcp_fin_acked) {
}
if (!tcp->tcp_fin_sent) {
/*
* tcp state must be ESTABLISHED
* in order for us to get here in
* the first place.
*/
/*
* Upon returning from this routine,
* tcp_wput_data() will set tcp_snxt
* to be equal to snxt + tcp_fin_sent.
* This is essentially the same as
* setting it to tcp_fss + 1.
*/
}
}
len += tcp_hdr_len;
else
/* setup header fragment */
tcp_hdr_len, /* len */
/* setup first payload fragment */
pbuf_idx, /* index */
/* create a split-packet in case of a spillover */
if (md_pbuf_nxt != NULL) {
ASSERT(!add_buffer);
md_pbuf_nxt = NULL;
pbuf_idx_nxt = -1;
cur_pld_off = spill;
/* trim out first payload fragment */
/* setup second payload fragment */
pbuf_idx, /* index */
spill); /* len */
/*
* Store the lbolt used for RTT
* estimation. We can only record one
* timestamp per mblk so we do it when
* we reach the end of the payload
* buffer. Also we only take a new
* timestamp sample when the previous
* timed data from the same mblk has
* been ack'ed.
*/
}
/*
* Advance xmit_tail; usable could be 0 by
* the time we got here, but we made sure
* above that we would only spillover to
* the next data block if usable includes
* the spilled-over amount prior to the
* subtraction. Therefore, we are sure
* that xmit_tail->b_cont can't be NULL.
*/
} else {
}
/*
* Fill in the header using the template header, and
* as needed.
*/
/* take care of some IP header businesses */
/*
* Assign ident value for current packet; see
* related comments in ip_wput_ire() about the
* contract private interface with clustering
* group.
*/
if (cl_inet_ipident != NULL) {
if ((*cl_inet_isclusterwide)(IPPROTO_IP,
ipha->ipha_ident =
}
}
if (!clusterwide) {
}
#ifndef _BIG_ENDIAN
#endif
} else {
if (tcp->tcp_ip_forward_progress) {
}
}
/* at least one payload span, and at most two */
/* add the packet descriptor to Multidata */
KM_NOSLEEP)) == NULL) {
/*
* Any failure other than ENOMEM indicates
* that we have passed in invalid pkt_info
* or parameters to mmd_addpdesc, which must
* not happen.
*
* EINVAL is a result of failure on boundary
* checks against the pkt_info contents. It
* should not happen, and we panic because
* either there's horrible heap corruption,
*/
"pdesc logic error detected for "
"tcp %p mmd %p pinfo %p (%d)\n",
}
goto legacy_send; /* out_of_mem */
}
/* calculate IP header and TCP checksums */
/* calculate pseudo-header checksum */
/* offset for TCP header checksum */
} else {
/* calculate pseudo-header checksum */
/* Fold the initial sum */
}
if (hwcksum_flags & HCK_FULLCKSUM) {
/* clear checksum field for hardware */
*up = 0;
} else if (hwcksum_flags & HCK_PARTIALCKSUM) {
/* pseudo-header checksumming */
} else {
/* software checksumming */
if (*up == 0)
*up = 0xFFFF;
}
/* IPv4 header checksum */
if (hwcksum_flags & HCK_IPV4_HDRCKSUM) {
ipha->ipha_hdr_checksum = 0;
} else {
}
}
goto legacy_send;
/* build payload mblk for this segment */
goto legacy_send;
}
} else {
}
/*
* Need to pass it to normal path.
*/
tcp->tcp_last_sent_len ||
/*
* Need to pass all packets of this
* buffer to normal path, either when
* packet is blocked, or when boundary
* of header buffer or payload buffer
* has been changed by FW_HOOKS[6].
*/
if (md_mp_head != NULL) {
md_mp);
if (err == 0)
md_mp_head = NULL;
}
/* send down what we've got so far */
if (md_mp_head != NULL) {
}
md_mp_head = NULL;
q, mp);
mp1 = fw_mp_head;
do {
q, mp);
} else {
if (fw_mp_head == NULL)
fw_mp_head = mp;
else
}
}
}
/* advance header offset */
++obsegs;
*tail_unsent > 0);
/*
* Store the lbolt used for RTT estimation. We can only
* record one timestamp per mblk so we do it when we
* reach the end of the payload buffer. Also we only
* take a new timestamp sample when the previous timed
* data from the same mblk has been ack'ed.
*/
}
ASSERT(*tail_unsent >= 0);
if (*tail_unsent > 0) {
/*
* We got here because we broke out of the above
* loop due to of one of the following cases:
*
* 1. len < adjusted MSS (i.e. small),
* 2. Sender SWS avoidance,
* 3. max_pld is zero.
*
* We are done for this Multidata, so trim our
* last payload buffer (if any) accordingly.
*/
} else if (*usable > 0) {
add_buffer = B_TRUE;
}
while (fw_mp_head) {
mp = fw_mp_head;
}
if (buf_trunked) {
}
(tcp_mdt_chain || max_pld > 0));
if (md_mp_head != NULL) {
/* send everything down */
&rconfirm);
}
return (0);
}
/*
* A wrapper function for sending one or more Multidata messages down to
* the module below ip; this routine does not release the reference of the
* IRE (caller does that). This routine is analogous to tcp_send_data().
*/
static void
{
/* adjust MIBs and IRE timestamp */
} else {
}
/* send it down */
if (ILL_DLS_CAPABLE(ill)) {
} else {
}
return;
/* reachability confirmation? */
if (*rconfirm) {
if (ip_debug > 2) {
/* ip1dbg */
pr_addr_dbg("tcp_multisend_data: state "
"for %s changed to REACHABLE\n",
}
}
/* reset transport reachability confirmation */
}
case ND_REACHABLE:
case ND_STALE:
/*
* ND_REACHABLE is identical to ND_STALE in this
* specific case. If reachable time has expired for
* this neighbor (delta is greater than reachable
* time), conceptually, the neighbor cache is no
* longer in REACHABLE state, but already in STALE
* state. So the correct transition here is to
* ND_DELAY.
*/
if (ip_debug > 3) {
/* ip2dbg */
pr_addr_dbg("tcp_multisend_data: state "
"for %s changed to DELAY\n",
}
break;
case ND_DELAY:
case ND_PROBE:
/* Timers have already started */
break;
case ND_UNREACHABLE:
/*
* ndp timer has detected that this nce is
* unreachable and initiated deleting this nce
* and all its associated IREs. This is a race
* where we found the ire before it was deleted
* and have just sent out a packet using this
* unreachable nce.
*/
break;
default:
ASSERT(0);
}
}
}
/*
* Derived from tcp_send_data().
*/
static void
int num_lso_seg)
{
uint32_t hcksum_txflags = 0;
#ifndef _BIG_ENDIAN
#endif
if (tcp->tcp_snd_zcopy_aware) {
}
}
/*
* Since the TCP checksum should be recalculated by h/w, we can just
* zero the checksum field for HCK_FULLCKSUM, or calculate partial
* pseudo-header checksum for HCK_PARTIALCKSUM.
* The partial pseudo-header excludes TCP length, that was calculated
* in tcp_send(), so to zero *up before further processing.
*/
*up = 0;
/*
* Append LSO flag to DB_LSOFLAGS(mp) and set the mss to DB_LSOMSS(mp).
*/
if (ILL_DLS_CAPABLE(ill)) {
/*
* Send the packet directly to DLD, where it may be queued
* depending on the availability of transmit resources at
* the media layer.
*/
} else {
}
}
}
/*
* tcp_send() is called by tcp_wput_data() for non-Multidata transmission
* scheme, and returns one of the following:
*
* -1 = failed allocation.
* 0 = success; burst count reached, or usable send window is too small,
* and that we'd rather wait until later before sending again.
* 1 = success; we are called from tcp_multisend(), and both usable send
* window and tail_unsent are greater than the MDT threshold, and thus
* Multidata Transmit should be used instead.
*/
static int
const int mdt_thres)
{
uint_t ire_fp_mp_len = 0;
int num_lso_seg = 1;
/*
* Check LSO capability before any further work. And the similar check
* need to be done in for(;;) loop.
* LSO will be deployed when therer is more than one mss of available
* data and a burst transmission is allowed.
*/
(tcp->tcp_valid_bits == 0 ||
/*
*/
/*
* Enable LSO with this transmission.
* Since IRE has been hold in
* tcp_send_find_ire_ill(), IRE_REFRELE(ire)
* should be called before return.
*/
/* Round up to multiple of 4 */
} else {
}
}
for (;;) {
int len;
/*
* If we're called by tcp_multisend(), and the amount of
* sendable data as well as the size of current xmit_tail
* is beyond the MDT threshold, return to the caller and
* let the large data transmit be done using MDT.
*/
return (1); /* success; do large send */
}
if (num_burst_seg == 0)
break; /* success; burst count reached */
/*
* Calculate the maximum payload length we can send in *one*
* time.
*/
if (do_lso_send) {
/*
* Check whether need to do LSO any more.
*/
num_burst_seg * mss);
if (lso_usable % mss) {
num_lso_seg++;
(lso_usable % mss);
} else {
}
} else {
num_lso_seg = 1;
lso_usable = mss;
}
}
/*
* Adjust num_burst_seg here.
*/
if (len <= 0) {
/* Terminate the loop */
break; /* success; too small */
}
/*
* Sender silly-window avoidance.
* Ignore this if we are going to send a
* zero window probe out.
*
* TODO: force data into microscopic window?
* ==> (!pushed || (unsent > usable))
*/
/*
* If the retransmit timer is not running
* we start it so that we will retransmit
* in the case when the the receiver has
* decremented the window.
*/
/*
* We are not supposed to send
* anything. So let's wait a little
* bit longer before breaking SWS
* avoidance.
*
* What should the value be?
* Suggestion: MAX(init rexmit time,
* tcp->tcp_rto)
*/
}
break; /* success; too small */
}
}
/*
* The reason to adjust len here is that we need to set flags
* and calculate checksum.
*/
if (do_lso_send)
len = lso_usable;
if (*usable > 0)
else
/*
* Prime pump for IP's checksumming on our behalf
* Include the adjustment for a source route if any.
*/
/*
* Branch off to tcp_xmit_mp() if any of the VALID bits is
* set. For the case when TCP_FSS_VALID is the only valid
* bit (normal active close), branch off only when we think
* that the FIN flag needs to be set. Note for this case,
* that (snxt + len) may not reflect the actual seg_len,
* as len may be further reduced in tcp_xmit_mp(). If len
* gets modified, we will end up here again.
*/
if (tcp->tcp_valid_bits != 0 &&
if (*tail_unsent == 0) {
} else {
}
/* Restore tcp_snxt so we get amount sent right. */
/*
* If the previous timestamp is still in use,
* don't stomp on it.
*/
}
} else
return (-1);
}
}
continue;
}
if (*tail_unsent) {
/* Are the bytes above us in flight? */
*tail_unsent -= len;
len += tcp_hdr_len;
else
return (-1); /* out_of_mem */
}
/*
* If the old timestamp is no longer in use,
* sample a new timestamp now.
*/
}
goto must_alloc;
}
} else {
}
*tail_unsent -= len;
len += tcp_hdr_len;
else
return (-1); /* out_of_mem */
}
len = tcp_hdr_len;
/*
* There are four reasons to allocate a new hdr mblk:
* 1) The bytes above us are in use by another packet
* 2) We don't have good alignment
* 3) The mblk is being shared
* 4) We don't have enough room for a header
*/
/* NOTE: we assume allocb returns an OK_32PTR */
return (-1); /* out_of_mem */
}
/* Leave room for Link Level header */
len = tcp_hdr_len;
rptr =
}
/*
* Fill in the header using the template header, and add
*/
if (*tail_unsent) {
int spill = *tail_unsent;
/*
* If we're a little short, tack on more mblks until
* there is no more spillover.
*/
while (spill < 0) {
int nmpsz;
/*
* Excess data in mblk; can we split it?
* If MDT is enabled for the connection,
* keep on splitting as this is a transient
* send path.
*/
/*
* Don't split if stream head was
* told to break up larger writes
* into smaller ones.
*/
if (tcp->tcp_maxpsz > 0)
break;
/*
* Next mblk is less than SMSS/2
* rounded up to nearest 64-byte;
* let it get sent as part of the
* next segment.
*/
if (tcp->tcp_localnet &&
break;
}
/* Stash for rtt use later */
*tail_unsent = spill;
return (-1); /* out_of_mem */
}
}
/* Trim back any surplus on the last mblk */
if (spill >= 0) {
*tail_unsent = spill;
} else {
/*
* We did not send everything we could in
* order to remain within the b_cont limit.
*/
/*
* Adjust the checksum
*/
} else {
}
*tail_unsent = 0;
}
}
if (tcp->tcp_ip_forward_progress) {
}
if (do_lso_send) {
} else {
}
}
return (0);
}
/* Unlink and return any mblk that looks like it contains a MDT info */
static mblk_t *
{
for (;;) {
/* no more to process? */
break;
case M_CTL:
continue;
return (mp);
default:
break;
}
}
return (mp);
}
/* MDT info update routine, called when IP notifies us about MDT */
static void
{
/*
* IP is telling us to abort MDT on this connection? We know
* this because the capability is only turned off when IP
* encounters some pathological cases, e.g. link-layer change
* where the new driver doesn't support MDT, or in situation
* where MDT usage on the link-layer has been switched off.
* IP would not have sent us the initial MDT_IOC_INFO_UPDATE
* if the link-layer doesn't support MDT, and if it does, it
* will indicate that the feature is to be turned on.
*/
ip1dbg(("tcp_mdt_update: disabling MDT for connp %p\n",
}
/*
* We currently only support MDT on simple TCP/{IPv4,IPv6},
* so disable MDT otherwise. The checks are done here
* and in tcp_wput_data().
*/
"version (%d), expected version is %d",
return;
}
/*
* We need the driver to be able to handle at least three
* spans per packet in order for tcp MDT to be utilized.
* The first is for the header portion, while the rest are
* needed to handle a packet that straddles across two
* virtually non-contiguous buffers; a typical tcp packet
* therefore consists of only two spans. Note that we take
* a zero as "don't care".
*/
if (mdt_capab->ill_mdt_span_limit > 0 &&
return;
}
/* a zero means driver wants default value */
if (tcp->tcp_mdt_max_pld == 0)
/* ensure 32-bit alignment */
if (!first && !prev_state) {
ip1dbg(("tcp_mdt_update: reenabling MDT for connp %p\n",
}
}
}
/* Unlink and return any mblk that looks like it contains a LSO info */
static mblk_t *
{
for (;;) {
/* no more to process? */
break;
case M_CTL:
continue;
return (mp);
default:
break;
}
}
return (mp);
}
/* LSO info update routine, called when IP notifies us about LSO */
static void
{
/*
* IP is telling us to abort LSO on this connection? We know
* this because the capability is only turned off when IP
* encounters some pathological cases, e.g. link-layer change
* where LSO usage on the link-layer has been switched off.
* IP would not have sent us the initial LSO_IOC_INFO_UPDATE
* if the link-layer doesn't support LSO, and if it does, it
* will indicate that the feature is to be turned on.
*/
/*
* so disable LSO otherwise. The checks are done here
* and in tcp_wput_data().
*/
} else {
}
}
static void
{
/*
* We may be in the fastpath here, and although we essentially do
* similar checks as in ip_bind_connected{_v6}/ip_xxinfo_return,
* we try to keep things as brief as possible. After all, these
* are only best-effort checks, and we do more thorough ones prior
* to calling tcp_send()/tcp_multisend().
*/
/* Cache the result */
ip1dbg(("tcp_ire_ill_check: connp %p enables "
"LSO for interface %s\n", (void *)connp,
}
} else if (ipst->ips_ip_multidata_outbound &&
ILL_MDT_CAPABLE(ill)) {
/* Cache the result */
ip1dbg(("tcp_ire_ill_check: connp %p enables "
"MDT for interface %s\n", (void *)connp,
}
}
}
/*
* The goal is to reduce the number of generated tcp segments by
* setting the maxpsz multiplier to 0; this will have an affect on
* tcp_maxpsz_set(). With this behavior, tcp will pack more data
* into each packet, up to SMSS bytes. Doing this reduces the number
* of outbound segments and incoming ACKs, thus allowing for better
* network and system performance. In contrast the legacy behavior
* may result in sending less than SMSS size, because the last mblk
* for some packets may have more data than needed to make up SMSS,
* and the legacy code refused to "split" it.
*
* We apply the new behavior on following situations:
*
* 1) Loopback connections,
* 2) Connections in which the remote peer is not on local subnet,
* 3) Local subnet connections over the bge interface (see below).
*
* Ideally, we would like this behavior to apply for interfaces other
* than bge. However, doing so would negatively impact drivers which
* perform dynamic mapping and unmapping of DMA resources, which are
* increased by setting the maxpsz multiplier to 0 (more mblks per
* packet will be generated by tcp). The bge driver does not suffer
* from this, as it copies the mblks into pre-mapped buffers, and
* therefore does not require more I/O resources than before.
*
* Otherwise, this behavior is present on all network interfaces when
* the destination endpoint is non-local, since reducing the number
* of packets in general is good for the network.
*
* TODO We need to remove this hard-coded conditional for bge once
* a better "self-tuning" mechanism, or a way to comprehend
* the driver transmit strategy is devised. Until the solution
* is found and well understood, we live with this hack.
*/
if (!tcp_static_maxpsz &&
/* override the default value */
tcp->tcp_maxpsz = 0;
ip3dbg(("tcp_ire_ill_check: connp %p tcp_maxpsz %d on "
}
/* set the stream head parameters accordingly */
}
/* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */
static void
{
/* TODO: How should flush interact with urgent data? */
/*
* Flush only data that has not yet been put on the wire. If
* we flush data that we have already transmitted, life, as we
* know it, may come to an end.
*/
tcp->tcp_xmit_tail_unsent = 0;
tcp->tcp_unsent = 0;
if (tail) {
for (;;) {
break;
}
if (tcp->tcp_snd_zcopy_aware)
}
/*
* We have no unsent data, so unsent must be less than
* tcp_xmit_lowater, so re-enable flow.
*/
if (tcp->tcp_flow_stopped) {
}
}
/*
* TODO: you can't just flush these, you have to increase rwnd for one
* thing. For another, how should urgent data interact?
*/
/* XXX */
return;
}
}
/*
* tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA
* messages.
*/
static void
{
int error;
/* Make sure it is one of ours. */
case TI_GETMYNAME:
case TI_GETPEERNAME:
break;
default:
return;
}
case -1:
return;
break;
/* Copy out the strbuf. */
mi_copyout(q, mp);
return;
/* All done. */
mi_copy_done(q, mp, 0);
return;
default:
return;
}
/* Check alignment of the strbuf */
return;
}
return;
}
return;
case TI_GETMYNAME:
break;
case TI_GETPEERNAME:
break;
}
if (error != 0) {
} else {
/* Copy out the address */
mi_copyout(q, mp);
}
}
/*
* tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL
* messages.
*/
/* ARGSUSED */
static void
{
/*
* Try and ASSERT the minimum possible references on the
* conn early enough. Since we are executing on write side,
* the connection is obviously not detached and that means
* there is a ref each for TCP and IP. Since we are behind
* the squeue, the minimum references needed are 3. If the
* conn is in classifier hash list, there should be an
* extra ref for that (we check both the possibilities).
*/
case TCP_IOC_DEFAULT_Q:
/* Wants to be the default wq. */
return;
}
return;
case _SIOCSOCKFALLBACK:
/*
* Either sockmod is about to be popped and the socket
* would now be treated as a plain stream, or a module
* is about to be pushed so we could no longer use read-
* side synchronous streams for fused loopback tcp.
* Drain any queued data and disable direct sockfs
* interface from now on.
*/
if (!tcp->tcp_issocket) {
} else {
#ifdef _ILP32
#else
#endif
/*
* Insert this socket into the acceptor hash.
* We might need it for T_CONN_RES message
*/
/*
* This is a fused loopback tcp; disable
* read-side synchronous streams interface
* and drain any queued data. It is okay
* to do this for non-synchronous streams
* fused tcp as well.
*/
}
}
return;
}
}
/*
* This routine is called by tcp_wput() to handle all TPI requests.
*/
/* ARGSUSED */
static void
{
int len;
/*
* Try and ASSERT the minimum possible references on the
* conn early enough. Since we are executing on write side,
* the connection is obviously not detached and that means
* there is a ref each for TCP and IP. Since we are behind
* the squeue, the minimum references needed are 3. If the
* conn is in classifier hash list, there should be an
* extra ref for that (we check both the possibilities).
*/
if (type == T_EXDATA_REQ) {
if (len < 0) {
return;
}
/*
* Try to force urgent data out on the wire.
* Even if we have unsent data this will
* at least send the urgent flag.
* XXX does not handle more flag correctly.
*/
/* Bypass tcp protocol for fused tcp loopback */
return;
} else if (type != T_DATA_REQ) {
goto non_urgent_data;
}
/* TODO: options, flags, ... from user */
/* Set length to zero for reclamation below */
return;
} else {
"tcp_wput_proto, dropping one...");
}
return;
}
case T_SSL_PROXY_BIND_REQ: /* an SSL proxy endpoint bind request */
/*
* save the kssl_ent_t from the next block, and convert this
* back to a normal bind_req.
*/
}
sizeof (kssl_ent_t));
}
/* FALLTHROUGH */
case O_T_BIND_REQ: /* bind request */
case T_BIND_REQ: /* new semantics bind request */
break;
case T_UNBIND_REQ: /* unbind request */
break;
case O_T_CONN_RES: /* old connection response XXX */
case T_CONN_RES: /* connection response */
break;
case T_CONN_REQ: /* connection request */
break;
case T_DISCON_REQ: /* disconnect request */
break;
case T_CAPABILITY_REQ:
break;
case T_INFO_REQ: /* information request */
break;
case T_SVR4_OPTMGMT_REQ: /* manage options req */
&tcp_opt_obj, B_TRUE);
break;
case T_OPTMGMT_REQ:
/*
* Note: no support for snmpcom_req() through new
* T_OPTMGMT_REQ. See comments in ip.c
*/
/* Only IP is allowed to return meaningful value */
B_TRUE);
break;
case T_UNITDATA_REQ: /* unitdata request */
break;
case T_ORDREL_REQ: /* orderly release req */
if (tcp_xmit_end(tcp) != 0) {
/*
* We were crossing FINs and got a reset from
* the other side. Just ignore it.
*/
"tcp_wput_proto, T_ORDREL_REQ out of "
"state %s",
}
}
break;
case T_ADDR_REQ:
break;
default:
"tcp_wput_proto, bogus TPI msg, type %d",
}
/*
* We used to M_ERROR. Sending TNOTSUPPORT gives the user
* to recover.
*/
break;
}
}
/*
* The TCP write service routine should never be called...
*/
/* ARGSUSED */
static void
{
}
/* Non overlapping byte exchanger */
static void
{
while (len-- > 0) {
}
}
/*
* Send out a control packet on the tcp connection specified. This routine
* is typically called where we need a simple ACK or RST generated.
*/
static void
{
int tcp_hdr_len;
int tcp_ip_hdr_len;
/*
* Save sum for use in source route later.
*/
/* If a text string is passed in with the request, pass it to strlog. */
"tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x",
}
BPRI_MED);
return;
}
} else {
}
/*
* Don't send TSopt w/ TH_RST packets per RFC 1323.
*/
if (tcp->tcp_snd_ts_ok &&
} else {
}
}
}
if (tcp->tcp_snd_ts_ok) {
}
/* Update the latest receive window size in TCP header. */
tcp->tcp_rack_cnt = 0;
}
/*
* Include the adjustment for a source route if any.
*/
}
/*
* If this routine returns B_TRUE, TCP can generate a RST in response
* to a segment. If it returns B_FALSE, TCP should not respond.
*/
static boolean_t
{
/*
* TCP needs to protect itself from generating too many RSTs.
* This can be a DoS attack by sending us random segments
* soliciting RSTs.
*
* What we do here is to have a limit of tcp_rst_sent_rate RSTs
* in each 1 second interval. In this way, TCP still generate
* RSTs in normal cases but when under attack, the impact is
* limited.
*/
if (tcps->tcps_rst_sent_rate_enabled != 0) {
/* lbolt can wrap around. */
1*SECONDS)) {
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* Send down the advice IP ioctl to tell IP to mark an IRE temporary.
*/
static void
{
&ipic);
} else {
&ipic);
}
return;
}
/*
* Return an IP advice ioctl mblk and set ipic to be the pointer
* to the advice structure.
*/
static mblk_t *
{
return (NULL);
return (NULL);
}
return (mp1);
}
/*
* Generate a reset based on an inbound packet, connp is set by caller
* when RST is in response to an unexpected inbound packet for which
* there is active tcp state in the system.
*
* IPSEC NOTE : Try to send the reply with the same protection as it came
* in. We still have the ipsec_mp that the packet was attached to. Thus
* the packet will go out at the same level of protection as it came in by
* converting the IPSEC_IN to IPSEC_OUT.
*/
static void
{
int i;
int addr_len;
void *addr;
/*
* For non-zero stackids the default queue isn't created
* until the first open, thus there can be a need to send
* a reset before then. But we can't do that, hence we just
* drop the packet. Later during boot, when the default queue
* has been setup, a retransmitted packet from the peer
* will result in a reset.
*/
return;
}
else
if (!tcp_send_rst_chk(tcps)) {
tcps->tcps_rst_unsent++;
return;
}
} else {
}
"tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, "
"flags 0x%x",
}
if (!mp) {
if (mctl_present)
return;
} else {
if (mctl_present) {
} else {
}
}
}
/*
* We skip reversing source route here.
* (for now we replace all IP options with EOL)
*/
for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
/*
* Make sure that src address isn't flagrantly invalid.
* Not all broadcast address checking for the src address
* is possible, since we don't know the netmask of the src
* addr. No check for destination address is done, since
* IP will not pass up a packet with a broadcast dest
* address to TCP. Similar checks are done below for IPv6.
*/
return;
}
} else {
return;
}
/* Remove any extension headers assuming partial overlay */
if (ip_hdr_len > IPV6_HDR_LEN) {
}
}
return;
}
/* Swap addresses */
ipha->ipha_ident = 0;
} else {
/* No ip6i_t in this case */
/* Swap addresses */
}
}
/* IP trusts us to set up labels when required. */
int err;
else
if (mctl_present)
else
if (err != 0) {
return;
}
} else {
}
}
if (mctl_present) {
return;
}
}
/* Add the zoneid so ip_output routes it properly */
return;
}
/*
* NOTE: one might consider tracing a TCP packet here, but
* this function has no active TCP state and no tcp structure
* that has a trace buffer. If we traced here, we would have
* to keep a local trace buffer in tcp_record_trace().
*
* TSol note: The mblk that contains the incoming packet was
* reused by tcp_xmit_listener_reset, so it already contains
* the right credentials and we don't need to call mblk_setcred.
* Also the conn's cred is not right since it is associated
* with tcps_g_q.
*/
/*
* Tell IP to mark the IRE used for this destination temporary.
* This way, we can limit our exposure to DoS attack because IP
* creates an IRE for each destination. If there are too many,
* the time to do any routing lookup will be extremely long. And
* the lookup can be in interrupt context.
*
* Note that in normal circumstances, this marking should not
* affect anything. It would be nice if only 1 message is
* needed to inform IP that the IRE created for this RST should
* not be added to the cache table. But there is currently
* not such communication mechanism between TCP and IP. So
* the best we can do now is to send the advice ioctl to IP
* to mark the IRE temporary.
*/
}
}
/*
* Initiate closedown sequence on an active connection. (May be called as
* writer.) Return value zero for OK return, non-zero for error return.
*/
static int
{
/*
* Invalid state, only states TCPS_SYN_RCVD,
* TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid
*/
return (-1);
}
/*
* If there is nothing more unsent, send the FIN now.
* Otherwise, it will go out with the last segment.
*/
if (tcp->tcp_unsent == 0) {
if (mp) {
} else {
/*
* Couldn't allocate msg. Pretend we got it out.
* Wait for rexmit timeout.
*/
}
/*
* If needed, update tcp_rexmit_snxt as tcp_snxt is
* changed.
*/
}
} else {
/*
* If tcp->tcp_cork is set, then the data will not get sent,
* so we have to check that and unset it first.
*/
}
/*
* If TCP does not get enough samples of RTT or tcp_rtt_updates
* is 0, don't update the cache.
*/
if (tcps->tcps_rtt_updates == 0 ||
return (0);
/*
* NOTE: should not update if source routes i.e. if tcp_remote if
* different from the destination.
*/
return (0);
}
&ipic);
} else {
return (0);
}
&ipic);
}
/* Record route attributes in the IRE for use by future connections. */
return (0);
/*
* We do not have a good algorithm to update ssthresh at this time.
* So don't do any update.
*/
return (0);
}
/*
* Generate a "no listener here" RST in response to an "unknown" segment.
* connp is set by caller when RST is in response to an unexpected
* inbound packet for which there is active tcp state in the system.
* Note that we are reusing the incoming mp to construct the outgoing RST.
*/
void
{
ipsec_in_t *ii;
if (ii->ipsec_in_dont_check) {
if (!ii->ipsec_in_secure) {
}
}
}
} else {
}
if (check && policy_present) {
/*
* The conn_t parameter is NULL because we already know
* nobody's home.
*/
return;
}
char *, "Could not reply with RST to mp(1)",
ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n"));
return;
}
tcp_xmit_early_reset("no tcp, reset",
connp);
} else {
seg_len++;
} else {
/*
* Here we violate the RFC. Note that a normal
* TCP will never send a segment without the ACK
* flag, except for RST or SYN segment. This
* segment is neither. Just drop it on the
* floor.
*/
tcps->tcps_rst_unsent++;
return;
}
tcp_xmit_early_reset("no tcp, reset/ack",
}
}
/*
* tcp_xmit_mp is called to return a pointer to an mblk chain complete with
* ip and tcp header ready to pass down to IP. If the mp passed in is
* non-NULL, then up to max_to_send bytes of data will be dup'ed off that
* mblk. (If sendall is not set the dup'ing will stop at an mblk boundary
* otherwise it will dup partial mblks.)
* Otherwise, an appropriate ACK packet will be generated. This
* routine is not usually called to send new data for the first time. It
* is mostly called out of the timer for retransmits, and to generate ACKs.
*
* If offset is not NULL, the returned mblk chain's first mblk's b_rptr will
* be adjusted by *offset. And after dupb(), the offset and the ending mblk
* of the original mblk chain will be returned in *offset and *end_mp.
*/
mblk_t *
{
int data_length;
int32_t num_sack_blk = 0;
int32_t sack_opt_len = 0;
/* Allocate for our maximum TCP header + link-level */
if (!mp1)
return (NULL);
data_length = 0;
/*
* Note that tcp_mss has been adjusted to take into account the
* timestamp option if applicable. Because SACK options do not
* appear in every TCP segments and they are of variable lengths,
* they cannot be included in tcp_mss. Thus we need to calculate
* the actual segment length when we need to send a segment which
* includes SACK options.
*/
}
/* We use offset as an indicator that end_mp is not NULL. */
}
/* This could be faster with cooperation from downstream */
/*
* Don't send the next mblk since the whole mblk
* does not fit.
*/
break;
if (!mp2) {
return (NULL);
}
if (data_length > max_to_send) {
break;
} else {
off = 0;
}
}
}
*seg_len = data_length;
}
/* Update the latest receive window size in TCP header. */
/*
* Use tcp_unsent to determine if the PUSH bit should be used assumes
* that this function was called from tcp_wput_data. Thus, when called
* to retransmit data the setting of the PUSH bit may appear some
* what random in that it might get set when it should not. This
* should not pose any performance issues.
*/
} else {
}
if (tcp->tcp_ecn_ok) {
if (tcp->tcp_ecn_echo_on)
/*
* Only set ECT bit and ECN_CWR if a segment contains new data.
* There is no TCP flow control for non-data segments, and
* only data segment is transmitted reliably.
*/
if (data_length > 0 && !rexmit) {
}
}
}
if (tcp->tcp_valid_bits) {
/*
* If TCP_ISS_VALID and the seq number is tcp_iss,
* TCP can only be in SYN-SENT, SYN-RCVD or
* FIN-WAIT-1 state. It can be FIN-WAIT-1 if
* our SYN is not ack'ed but the app closes this
* TCP connection.
*/
/*
* Tack on the MSS option. It is always needed
* for both active and passive open.
*
* MSS option value should be interface MTU - MIN
* the maximum segment size TCP can receive. But
* out there, we allow the option value to be the
* same as the MSS option size on the peer side.
* In this way, the other side will not send
* anything larger than they can receive.
*
* Note that for SYN_SENT state, the ndd param
* tcp_use_smss_as_mss_opt has no effect as we
* don't know the peer's MSS option value. So
* the only case we need to take care of is in
* SYN_RCVD state, which is done later.
*/
wptr[0] = TCPOPT_MAXSEG;
wptr += 2;
/* Update the offset to cover the additional word */
/*
* Note that the following way of filling in
* TCP options are not optimal. Some NOPs can
* be saved. But there is no need at this time
* to optimize it. When it is needed, we will
* do it.
*/
case TCPS_SYN_SENT:
if (tcp->tcp_snd_ts_ok) {
wptr[0] = TCPOPT_NOP;
wptr += 4;
wptr += 4;
U32_TO_BE32(0L, wptr);
tcph->th_offset_and_rsrvd[0] +=
(3 << 4);
}
/*
* Set up all the bits to tell other side
* we are ECN capable.
*/
if (tcp->tcp_ecn_ok) {
}
break;
case TCPS_SYN_RCVD:
/*
* Reset the MSS option value to be SMSS
* We should probably add back the bytes
* for timestamp option and IPsec. We
* don't do that as this is a workaround
* is better for us to be more cautious.
* They may not take these things into
* account in their SMSS calculation. Thus
* the peer's calculated SMSS may be smaller
* than what it can be. This should be OK.
*/
if (tcps->tcps_use_smss_as_mss_opt) {
}
/*
* If the other side is ECN capable, reply
* that we are also ECN capable.
*/
if (tcp->tcp_ecn_ok)
break;
default:
/*
* The above ASSERT() makes sure that this
* must be FIN-WAIT-1 state. Our SYN has
* not been ack'ed so retransmit it.
*/
break;
}
if (tcp->tcp_snd_ws_ok) {
wptr[0] = TCPOPT_NOP;
}
if (tcp->tcp_snd_sack_ok) {
wptr[0] = TCPOPT_NOP;
}
/* allocb() of adequate mblk assures space */
/*
* Get IP set to checksum on our behalf
* Include the adjustment for a source route if any.
*/
}
if (!tcp->tcp_fin_acked) {
}
if (!tcp->tcp_fin_sent) {
case TCPS_SYN_RCVD:
case TCPS_ESTABLISHED:
break;
case TCPS_CLOSE_WAIT:
break;
}
}
}
/*
* Note the trick here. u1 is unsigned. When tcp_urg
* is smaller than seq, u1 will become a very huge value.
* So the comparison will fail. Also note that tcp_urp
* should be positive, see RFC 793 page 17.
*/
}
}
tcp->tcp_rack_cnt = 0;
if (tcp->tcp_snd_ts_ok) {
}
}
if (num_sack_blk > 0) {
int32_t i;
wptr[0] = TCPOPT_NOP;
sizeof (sack_blk_t);
for (i = 0; i < num_sack_blk; i++) {
}
}
} else {
sizeof (ip6i_t) : 0));
}
/*
* Prime pump for IP
* Include the adjustment for a source route if any.
*/
if (tcp->tcp_ip_forward_progress) {
}
return (mp1);
}
/* This function handles the push timeout. */
void
tcp_push_timer(void *arg)
{
/*
* We need to plug synchronous streams during our drain to prevent
* a race with tcp_fuse_rrw() or tcp_fusion_rinfop().
*/
tcp->tcp_push_tid = 0;
/* sod_wakeup() does the mutex_exit() */
}
if (flags == TH_ACK_NEEDED)
}
/*
* This function handles delayed ACK timeout.
*/
static void
tcp_ack_timer(void *arg)
{
tcp->tcp_ack_tid = 0;
return;
/*
* Do not send ACK if there is no outstanding unack'ed data.
*/
return;
}
/*
* Make sure we don't allow deferred ACKs to result in
* timer-based ACKing. If we have held off an ACK
* when there was more than an mss here, and the timer
* goes off, we have to worry about the possibility
* that the sender isn't doing slow-start, or is out
* of step with us for some other reason. We fall
* permanently back in the direction of
* ACK-every-other-packet as suggested in RFC 1122.
*/
tcp->tcp_rack_abs_max--;
}
}
}
/* Generate an ACK-only (no data) segment for a TCP endpoint */
static mblk_t *
{
/*
* There are a few cases to be considered while setting the sequence no.
* Essentially, we can come here while processing an unacceptable pkt
* in the TCPS_SYN_RCVD state, in which case we set the sequence number
* to snxt (per RFC 793), note the swnd wouldn't have been set yet.
* If we are here for a zero window probe, stick with suna. In all
* other cases, we check if suna + swnd encompasses snxt and set
* the sequence number to snxt, if so. If snxt falls outside the
* window (the receiver probably shrunk its window), we will go with
* suna + swnd, otherwise the sequence no will be unacceptable to the
* receiver.
*/
if (tcp->tcp_zero_win_probe) {
} else {
}
if (tcp->tcp_valid_bits) {
/*
* For the complex case where we have to send some
* controls (FIN or SYN), let tcp_xmit_mp do it.
*/
} else {
/* Generate a simple ACK */
int data_length;
int32_t num_sack_blk = 0;
/*
* Allocate space for TCP + IP headers
* and link-level header
*/
} else {
}
if (!mp1)
return (NULL);
/* Update the latest receive window size in TCP header. */
/* copy in prototype TCP + IP header */
/* Set the TCP sequence number. */
/* Set up the TCP flag field. */
if (tcp->tcp_ecn_echo_on)
tcp->tcp_rack_cnt = 0;
/* fill in timestamp option if in use */
if (tcp->tcp_snd_ts_ok) {
}
/* Fill in SACK options */
if (num_sack_blk > 0) {
int32_t i;
wptr[0] = TCPOPT_NOP;
sizeof (sack_blk_t);
for (i = 0; i < num_sack_blk; i++) {
}
<< 4);
}
} else {
/* Check for ip6i_t header in sticky hdrs */
sizeof (ip6i_t) : 0));
}
/*
* Prime pump for checksum calculation in IP. Include the
* adjustment for a source route if any.
*/
if (tcp->tcp_ip_forward_progress) {
}
return (mp1);
}
}
/*
* To create a temporary tcp structure for inserting into bind hash list.
* The parameter is assumed to be in network byte order, ready for use.
*/
/* ARGSUSED */
static tcp_t *
{
return (NULL);
/*
* Only initialize the necessary info in those structures. Note
* that since INADDR_ANY is all 0, we do not need to set
* tcp_bound_source to INADDR_ANY here.
*/
/* Just for place holding... */
return (tcp);
}
/*
* To remove a port range specified by lo_port and hi_port from the
* reserved port ranges. This is one of the three public functions of
* the reserved port interface. Note that a port range has to be removed
* as a whole. Ports in a range cannot be removed individually.
*
* Params:
* in_port_t lo_port: the beginning port of the reserved port range to
* be deleted.
* in_port_t hi_port: the ending port of the reserved port range to
* be deleted.
*
* Return:
* B_TRUE if the deletion is successful, B_FALSE otherwise.
*
* Assumes that nca is only for zoneid=0
*/
{
int i, j;
int size;
/* First make sure that the port ranage is indeed reserved. */
for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) {
break;
}
}
if (i == tcps->tcps_reserved_port_array_size) {
return (B_FALSE);
}
/*
* Remove the range from the array. This simple loop is possible
* because port ranges are inserted in ascending order.
*/
}
/* Remove all the temporary tcp structures. */
while (size > 0) {
size--;
}
return (B_TRUE);
}
/*
* Macro to remove temporary tcp structure from the bind hash list. The
* first parameter is the list of tcp to be removed. The second parameter
* is the number of tcps in the array.
*/
{ \
while ((num) > 0) { \
if (tcpnext) { \
tcpnext->tcp_ptpbhn = \
tcp->tcp_ptpbhn; \
} \
(num)--; \
} \
}
/*
* The public interface for other modules to call to reserve a port range
* in TCP. The caller passes in how large a port range it wants. TCP
* will try to find a range and return it via lo_port and hi_port. This is
* used by NCA's nca_conn_init.
* NCA can only be used in the global zone so this only affects the global
* zone's ports.
*
* Params:
* int size: the size of the port range to be reserved.
* in_port_t *lo_port (referenced): returns the beginning port of the
* reserved port range added.
* in_port_t *hi_port (referenced): returns the ending port of the
* reserved port range added.
*
* Return:
* B_TRUE if the port reservation is successful, B_FALSE otherwise.
*
* Assumes that nca is only for zoneid=0
*/
{
int i, j;
/* Sanity check. */
return (B_FALSE);
}
if (tcps->tcps_reserved_port_array_size ==
return (B_FALSE);
}
/*
* Find the starting port to try. Since the port ranges are ordered
* in the reserved port array, we can do a simple search here.
*/
for (i = 0; i < tcps->tcps_reserved_port_array_size;
break;
}
}
/* No available port range. */
if (i == tcps->tcps_reserved_port_array_size &&
return (B_FALSE);
}
if (temp_tcp_array == NULL) {
return (B_FALSE);
}
/* Go thru the port range to see if some ports are already bound. */
/*
* A port is already bound. Search again
* starting from port + 1. Release all
* temporary tcps.
*/
tcps);
cur_size = -1;
break;
}
}
if (!used) {
NULL) {
/*
* Allocation failure. Just fail the request.
* Need to remove all those temporary tcp
* structures.
*/
tcps);
sizeof (tcp_t *));
return (B_FALSE);
}
}
}
/*
* The current range is not large enough. We can actually do another
* search if this search is done between 2 reserved port ranges. But
* for first release, we just stop here and return saying that no port
* range is available.
*/
return (B_FALSE);
}
/*
* Insert range into array in ascending order. Since this function
* must not be called often, we choose to use the simplest method.
* The above array should not consume excessive stack space as
* the size must be very small. If in future releases, we find
* that we should provide more reserved port ranges, this function
* has to be modified to be more efficient.
*/
if (tcps->tcps_reserved_port_array_size == 0) {
} else {
for (i = 0, j = 0; i < tcps->tcps_reserved_port_array_size;
i++, j++) {
i == j) {
j++;
}
tmp_ports[j].temp_tcp_array =
}
if (j == i) {
}
}
return (B_TRUE);
}
/*
* Check to see if a port is in any reserved port range.
*
* Params:
* in_port_t port: the port to be verified.
*
* Return:
* B_TRUE is the port is inside a reserved port range, B_FALSE otherwise.
*/
{
int i;
for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* To list all reserved port ranges. This is the function to handle
* ndd tcp_reserved_port_list.
*/
/* ARGSUSED */
static int
{
int i;
if (tcps->tcps_reserved_port_array_size > 0)
else
for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) {
}
return (0);
}
/*
* Hash list insertion routine for tcp_t structures.
* Inserts entries with the ones bound to a specific IP address first
* followed by those bound to INADDR_ANY.
*/
static void
{
}
if (!caller_holds_lock) {
} else {
}
if (tcpnext) {
/*
* If the new tcp bound to the INADDR_ANY address
* and the first one in the list is not bound to
* INADDR_ANY we skip all entries until we find the
* first one bound to INADDR_ANY.
* This makes sure that applications binding to a
* specific address get preference over those binding to
* INADDR_ANY.
*/
if (tcpnext)
} else
}
if (!caller_holds_lock)
}
/*
* Hash list removal routine for tcp_t structures.
*/
static void
{
return;
/*
* Extract the lock pointer in case there are concurrent
* hash_remove's for this instance.
*/
if (tcp->tcp_ptpbhn) {
if (tcpnext) {
}
}
}
/*
* Hash list lookup routine for tcp_t structures.
* Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
*/
static tcp_t *
{
return (tcp);
}
}
return (NULL);
}
/*
* Hash list insertion routine for tcp_t structures.
*/
void
{
if (tcpnext)
}
/*
* Hash list removal routine for tcp_t structures.
*/
static void
{
/*
* Extract the lock pointer in case there are concurrent
* hash_remove's for this instance.
*/
return;
if (tcp->tcp_ptpahn) {
if (tcpnext) {
}
}
}
/* ARGSUSED */
static int
{
int error = 0;
int retval;
char *end;
/*
* If the following variables are still zero after parsing the input
* string, the user didn't specify them and we don't change them in
* the HSP.
*/
long sendspace = 0; /* Send buffer size */
long recvspace = 0; /* Receive buffer size */
long timestamp = 0; /* Originate TCP TSTAMP option, 1 = yes */
/* Parse and validate address */
if (retval == 1)
} else {
goto done;
}
if (retval == 0) {
goto done;
}
value++;
/* Parse individual keywords, set variables if found */
while (*value) {
/* Skip leading blanks */
value++;
/* If at end of string, we're done */
if (!*value)
break;
/* We have a word, figure out what it is */
value += 4;
value++;
/* Parse subnet mask */
if (retval == 1) {
}
}
if (retval != 1) {
goto done;
}
value++;
value += 9;
goto done;
}
value += 9;
goto done;
}
value += 9;
goto done;
}
/*
* We increment timestamp so we know it's been set;
* this is undone when we put it in the HSP
*/
timestamp++;
value += 6;
} else {
goto done;
}
}
/* Hash address for lookup */
if (delete) {
/*
* Note that deletes don't return an error if the thing
* we're trying to delete isn't there.
*/
goto done;
if (hsp) {
&v6addr)) {
} else {
if (IN6_ARE_ADDR_EQUAL(
break;
}
}
}
}
} else {
/*
* so, allocate the hash table.
*/
if (!tcps->tcps_hsp_hash) {
if (!tcps->tcps_hsp_hash) {
goto done;
}
}
/* Get head of hash chain */
/* Try to find pre-existing hsp on hash chain */
/* Doesn't handle CIDR prefixes. */
while (hsp) {
break;
}
/*
* If we didn't, create one with default values and put it
* at head of hash chain
*/
if (!hsp) {
if (!hsp) {
goto done;
}
}
/* Set values that the user asked us to change */
if (IN6_IS_ADDR_V4MAPPED(&v6addr))
else
if (sendspace > 0)
if (recvspace > 0)
if (timestamp > 0)
}
done:
return (error);
}
/* Set callback routine passed to nd_load by tcp_param_register. */
/* ARGSUSED */
static int
{
}
/* ARGSUSED */
static int
{
}
/* TCP host parameters report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
{
int i;
(void) mi_mpprintf(mp,
"Hash HSP " MI_COL_HDRPAD_STR
"Address Subnet Mask Send Receive TStamp");
if (tcps->tcps_hsp_hash) {
for (i = 0; i < TCP_HSP_HASH_SIZE; i++) {
while (hsp) {
&hsp->tcp_hsp_addr,
} else {
}
(void) mi_mpprintf(mp,
" %03d " MI_COL_PTRFMT_STR
"%s %s %010d %010d %d",
i,
(void *)hsp,
}
}
}
return (0);
}
/* Data for fast netmask macro used by tcp_hsp_lookup */
};
/*
* XXX This routine should go away and instead we should use the metrics
* associated with the routes to determine the default sndspace and rcvspace.
*/
static tcp_hsp_t *
{
/* Quick check without acquiring the lock. */
return (NULL);
/* This routine finds the best-matching HSP for address addr. */
if (tcps->tcps_hsp_hash) {
int i;
/* We do three passes: host, network, and subnet. */
for (i = 1; i <= 3; i++) {
/* Look for exact match on srchaddr */
while (hsp) {
break;
}
/*
* If this is the first pass:
* If we found a match, great, return it.
* If not, search for the network on the second pass.
*/
if (i == 1)
if (hsp)
break;
else
{
continue;
}
/*
* If this is the second pass:
* If we found a match, but there's a subnet mask,
* save the match but try again using the subnet
* mask on the third pass.
* Otherwise, return whatever we found.
*/
if (i == 2) {
continue;
} else {
break;
}
}
/*
* This must be the third pass. If we didn't find
* anything, return the saved network HSP instead.
*/
if (!hsp)
}
}
return (hsp);
}
/*
* XXX Equally broken as the IPv4 routine. Doesn't handle longest
* match lookup.
*/
static tcp_hsp_t *
{
/* Quick check without acquiring the lock. */
return (NULL);
/* This routine finds the best-matching HSP for address addr. */
if (tcps->tcps_hsp_hash) {
int i;
/* We do three passes: host, network, and subnet. */
v6srchaddr = *v6addr;
for (i = 1; i <= 3; i++) {
/* Look for exact match on srchaddr */
while (hsp) {
&v6srchaddr))
break;
}
/*
* If this is the first pass:
* If we found a match, great, return it.
* If not, search for the network on the second pass.
*/
if (i == 1)
if (hsp)
break;
else {
/* Assume a 64 bit mask */
v6srchaddr.s6_addr32[0] =
continue;
}
/*
* If this is the second pass:
* If we found a match, but there's a subnet mask,
* save the match but try again using the subnet
* mask on the third pass.
* Otherwise, return whatever we found.
*/
if (i == 2) {
if (hsp &&
&hsp->tcp_hsp_subnet_v6)) {
continue;
} else {
break;
}
}
/*
* This must be the third pass. If we didn't find
* anything, return the saved network HSP instead.
*/
if (!hsp)
}
}
return (hsp);
}
/*
* Type three generator adapted from the random() function in 4.4 BSD:
*/
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Type 3 -- x**31 + x**3 + 1 */
#define DEG_3 31
#define SEP_3 3
/* Protected by tcp_random_lock */
void
tcp_random_init(void)
{
int i;
/*
* Use high-res timer and current time for seed. Gethrtime() returns
* a longlong, which may contain resolution down to nanoseconds.
* The current time will either be a 32-bit or a 64-bit quantity.
* XOR the two together in a 64-bit result variable.
* Convert the result to a 32-bit value by multiplying the high-order
* 32-bits by the low-order 32-bits.
*/
(result & 0xffffffff);
for (i = 1; i < DEG_3; i++)
+ 12345;
tcp_random_rptr = &tcp_random_state[0];
for (i = 0; i < 10 * DEG_3; i++)
(void) tcp_random();
}
/*
* tcp_random: Return a random number in the range [1 - (128K + 1)].
* This range is selected to be approximately centered on TCP_ISS / 2,
* and easy to compute. We get this value by generating a 32-bit random
* number, selecting out the high-order 17 bits, and then adding one so
* that we never return zero.
*/
int
tcp_random(void)
{
int i;
/*
* The high-order bits are more random than the low-order bits,
* so we select out the high-order 17 bits and add one so that
* we never return zero.
*/
if (++tcp_random_fptr >= tcp_random_end_ptr) {
} else if (++tcp_random_rptr >= tcp_random_end_ptr)
return (i);
}
/*
* XXX This will go away when TPI is extended to send
* Given a queue, set the max packet size for the write
* side of the queue below stream head. This value is
* cached on the stream head.
* Returns 1 on success, 0 otherwise.
*/
static int
{
/*
* At this point change of a queue parameter is not allowed
* when a multiplexor is sitting on top.
*/
return (0);
return (1);
}
static int
int *t_errorp, int *sys_errorp)
{
int error;
int is_absreq_failure;
int prim_type;
struct T_conn_req *tcreqp;
struct T_conn_res *tcresp;
prim_type == T_CONN_RES);
switch (prim_type) {
case T_CONN_REQ:
break;
case O_T_CONN_RES:
case T_CONN_RES:
break;
}
*t_errorp = 0;
*sys_errorp = 0;
*do_disconnectp = 0;
switch (error) {
case 0: /* no error */
ASSERT(is_absreq_failure == 0);
return (0);
case ENOPROTOOPT:
break;
case EACCES:
break;
default:
break;
}
if (is_absreq_failure != 0) {
/*
* The connection request should get the local ack
* T_OK_ACK and then a T_DISCON_IND.
*/
*do_disconnectp = 1;
}
return (-1);
}
/*
* Split this function out so that if the secret changes, I'm okay.
*
* Initialize the tcp_iss_cookie and tcp_iss_key.
*/
static void
{
struct {
time_t t;
/*
* Start with the current absolute time.
*/
(void) drv_getparm(TIME, &t);
/*
* XXX - Need a more random number per RFC 1750, not this crap.
* OTOH, if what follows is pretty random, then I'm in better shape.
*/
/*
* The cpu_type_info is pretty non-random. Ugggh. It does serve
* as a good template.
*/
/*
* The pass-phrase. Normally this is supplied by user-called NDD.
*/
/*
* See 4010593 if this section becomes a problem again,
* but the local ethernet address is useful here.
*/
(void) localetheraddr(NULL,
/*
* Hash 'em all together. The MD5Final is called per-connection.
*/
sizeof (tcp_iss_cookie));
}
/*
* Set the RFC 1948 pass phrase
*/
/* ARGSUSED */
static int
{
/*
* Basically, value contains a new pass phrase. Pass it along!
*/
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/*
* Make sure we wait until the default queue is setup, yet allow
* tcp_g_q_create() to open a TCP stream.
* We need to allow tcp_g_q_create() do do an open
* of tcp, hence we compare curhread.
* All others have to wait until the tcps_g_q has been
* setup.
*/
void
{
return;
}
/* This thread will set it up */
return;
}
/* Everybody but the creator has to wait */
}
}
#define IP "ip"
/*
* Create a default tcp queue here instead of in strplumb
*/
void
{
int error;
int rval;
#ifdef NS_DEBUG
(void) printf("tcp_g_q_create()\n");
#endif
if (error) {
#ifdef DEBUG
printf("tcp_g_q_create: lyr ident get failed error %d\n",
error);
#endif
return;
}
/*
* We set the tcp default queue to IPv6 because IPv4 falls
* back to IPv6 when it can't find a client, but
* IPv6 does not fall back to IPv4.
*/
if (error) {
#ifdef DEBUG
printf("tcp_g_q_create: open of TCP6DEV failed error %d\n",
error);
#endif
goto out;
}
/*
* This ioctl causes the tcp framework to cache a pointer to
* this stream, so we don't want to close the stream after
* this operation.
* Use the kernel credentials that are for the zone we're in.
*/
if (error) {
#ifdef DEBUG
printf("tcp_g_q_create: ioctl TCP_IOC_DEFAULT_Q failed "
"error %d\n", error);
#endif
goto out;
}
out:
/* Close layered handles */
if (li)
/* Keep cred around until _inactive needs it */
}
/*
* We keep tcp_g_q set until all other tcp_t's in the zone
* has gone away, and then when tcp_g_q_inactive() is called
* we clear it.
*/
void
{
#ifdef NS_DEBUG
(void) printf("tcp_g_q_destroy()for stack %d\n",
#endif
return; /* Nothing to cleanup */
}
/*
* Drop reference corresponding to the default queue.
* This reference was added from tcp_open when the default queue
* was created, hence we compensate for this extra drop in
* tcp_g_q_close. If the refcnt drops to zero here it means
* the default queue was the last one to be open, in which
* case, then tcp_g_q_inactive will be
* called as a result of the refrele.
*/
}
/*
* Called when last tcp_t drops reference count using TCPS_REFRELE.
* Run by tcp_q_q_inactive using a taskq.
*/
static void
tcp_g_q_close(void *arg)
{
int error;
#ifdef NS_DEBUG
(void) printf("tcp_g_q_inactive() for stack %d refcnt %d\n",
#endif
return; /* Nothing to cleanup */
if (error) {
#ifdef DEBUG
printf("tcp_g_q_inactive: lyr ident get failed error %d\n",
error);
#endif
return;
}
/*
* Make sure we can break the recursion when tcp_close decrements
* the reference count causing g_q_inactive to be called again.
*/
/* close the default queue */
/*
* At this point in time tcps and the rest of netstack_t might
* have been deleted.
*/
/* Close layered handles */
}
/*
* Called when last tcp_t drops reference count using TCPS_REFRELE.
*
* Have to ensure that the ldi routines are not used by an
* interrupt thread by using a taskq.
*/
void
{
return; /* Nothing to cleanup */
if (servicing_interrupt()) {
} else {
}
}
/*
* Called by IP when IP is loaded into the kernel
*/
void
tcp_ddi_g_init(void)
{
sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
sizeof (tcp_sack_info_t), 0,
/* Initialize the random number generator */
/* A single callback independently of how many netstacks we have */
/*
* We want to be informed each time a stack is created or
* destroyed in the kernel, so we can maintain the
* set of tcp_stack_t's.
*/
}
/*
* Initialize the TCP stack instance.
*/
static void *
{
tcpparam_t *pa;
int i;
/* Initialize locks */
for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
}
for (i = 0; i < TCP_FANOUT_SIZE; i++) {
}
/* TCP's IPsec code calls the packet dropper. */
/*
* Note: To really walk the device tree you need the devinfo
* The following is safe only because it uses ddi_root_node()
*/
/*
* Initialize RFC 1948 secret values. This will probably be reset once
* by the boot scripts.
*
* Use NULL name, as the name is caught by the new lockstats.
*
* Initialize with some random, non-guessable string, like the global
* T_INFO_ACK.
*/
sizeof (tcp_g_t_info_ack), tcps);
return (tcps);
}
/*
* Called when the IP module is about to be unloaded.
*/
void
tcp_ddi_g_destroy(void)
{
tcp_g_kstat = NULL;
}
/*
* Shut down the TCP stack instance.
*/
/* ARGSUSED */
static void
{
}
/*
* Free the TCP stack instance.
*/
static void
{
int i;
for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
}
for (i = 0; i < TCP_FANOUT_SIZE; i++) {
}
}
/*
* Generate ISS, taking into account NDD changes may happen halfway through.
* (If the iss is not zero, set it.)
*/
static void
{
switch (tcps->tcps_strong_iss) {
case 2:
} else {
}
/*
* Now that we've hashed into a unique per-connection sequence
* space, add a random increment per strong_iss == 1. So I
* guess we'll have to...
*/
/* FALLTHRU */
case 1:
break;
default:
break;
}
}
/*
* Exported routine for extracting active tcp connection status.
*
* This is used by the Solaris Cluster Networking software to
* gather a list of connections that need to be forwarded to
* specific nodes in the cluster when configuration changes occur.
*
* The callback is invoked for each tcp_t structure. Returning
* non-zero from the callback routine terminates the search.
*/
int
void *arg)
{
netstack_t *ns;
int ret = 0;
ns->netstack_tcp);
}
return (ret);
}
static int
{
int i;
for (i = 0; i < CONN_G_HASH_SIZE; i++) {
while ((connp =
/*
* The macros tcp_laddr and tcp_faddr give the IPv4
* addresses. They are copied implicitly below as
* mapped addresses.
*/
} else {
}
/*
* If the callback returns non-zero
* we terminate the traversal.
*/
return (1);
}
}
}
return (0);
}
/*
* Macros used for accessing the different types of sockaddr
* structures inside a tcp_ioc_abort_conn_t.
*/
/*
* Return the correct error code to mimic the behavior
* of a connection reset.
*/
switch ((state)) { \
case TCPS_SYN_SENT: \
case TCPS_SYN_RCVD: \
(err) = ECONNREFUSED; \
break; \
case TCPS_ESTABLISHED: \
case TCPS_FIN_WAIT_1: \
case TCPS_FIN_WAIT_2: \
case TCPS_CLOSE_WAIT: \
(err) = ECONNRESET; \
break; \
case TCPS_CLOSING: \
case TCPS_LAST_ACK: \
case TCPS_TIME_WAIT: \
(err) = 0; \
break; \
default: \
} \
}
/*
* Check if a tcp structure matches the info in acp.
*/
(TCP_AC_V4LPORT((acp)) == 0 || \
(TCP_AC_V4RPORT((acp)) == 0 || \
&(tcp)->tcp_ip_src_v6)) && \
&(tcp)->tcp_remote_v6)) && \
(TCP_AC_V6LPORT((acp)) == 0 || \
(TCP_AC_V6RPORT((acp)) == 0 || \
/*
* Build a message containing a tcp_ioc_abort_conn_t structure
* which is filled in with information from acp and tp.
*/
static mblk_t *
{
return (NULL);
sizeof (uint32_t));
} else {
}
return (mp);
}
/*
* Print a tcp_ioc_abort_conn_t structure.
*/
static void
{
char lbuf[128];
char rbuf[128];
lbuf, 128);
rbuf, 128);
} else {
lbuf, 128);
rbuf, 128);
}
/*
* Don't print this message to the console if the operation was done
* to a non-global zone.
*/
logflags |= SL_CONSOLE;
"TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, "
}
/*
* Called inside tcp_rput when a message built using
* tcp_ioctl_abort_build_msg is put into a queue.
* Note that when we get here there is no wildcard in acp any more.
*/
static void
{
/*
* If we get here, we are already on the correct
* squeue. This ioctl follows the following path
* tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn
* ->tcp_ioctl_abort->squeue_fill (if on a
* different squeue)
*/
int errcode;
}
}
/*
* Abort all matching connections on a hash chain.
*/
static int
{
nmatch = 0;
break;
}
} else {
}
nmatch++;
if (exact)
break;
}
/* Avoid holding lock for too long. */
if (nmatch >= 500)
break;
}
/* Pass mp into the correct tcp */
}
goto startover;
return (err);
}
/*
* Abort all connections that matches the attributes specified in acp.
*/
static int
{
int index = -1;
}
} else {
}
}
/*
* For cases where remote addr, local port, and remote port are non-
* wildcards, tcp_ioctl_abort_bucket will only be called once.
*/
if (index != -1) {
} else {
/*
* loop through all entries for wildcard case
*/
for (index = 0;
index++) {
if (err != 0)
break;
}
}
/*
* Don't print this message to the console if the operation was done
* to a non-global zone.
*/
logflags |= SL_CONSOLE;
return (err);
}
/*
* Process the TCP_IOC_ABORT_CONN ioctl request.
*/
static void
{
int err;
goto out;
}
/* check permissions */
goto out;
}
}
/* check that a zone with the supplied zoneid exists */
} else {
goto out;
}
}
/*
* For exclusive stacks we set the zoneid to zero
* to make TCP operate as if in the global zone.
*/
goto out;
}
out:
}
if (err != 0)
else
}
/*
* tcp_time_wait_processing() handles processing of incoming packets when
* the tcp is in the TIME_WAIT state.
* A TIME_WAIT tcp that has an associated open TCP stream is never put
* on the time wait list.
*/
void
{
if (tcp->tcp_snd_ts_ok) {
goto done;
}
}
if (gap < 0) {
goto done;
}
/*
* When TCP receives a duplicate FIN in
* TIME_WAIT state, restart the 2 MSL timer.
* See page 73 in RFC 793. Make sure this TCP
* is already on the TIME_WAIT list. If not,
* just restart the timer.
*/
if (TCP_IS_DETACHED(tcp)) {
B_TRUE) {
}
} else {
}
goto done;
}
flags |= TH_ACK_NEEDED;
seg_len = 0;
goto process_ack;
}
/* Fix seg_seq, and chew the gap off the front. */
}
/*
* Make sure that when we accept the connection, pick
* an ISS greater than (tcp_snxt + ISS_INCR/2) for the
* old connection.
*
* The next ISS generated is equal to tcp_iss_incr_extra
* + ISS_INCR/2 + other components depending on the
* value of tcp_strong_iss. We pre-calculate the new
* ISS here and compare with tcp_snxt to determine if
* we need to make adjustment to tcp_iss_incr_extra.
*
* The above calculation is ugly and is a
* waste of CPU cycles...
*/
switch (tcps->tcps_strong_iss) {
case 2: {
/* Add time and MD5 components. */
struct {
} arg;
/* We use MAPPED addresses in tcp_iss_init */
} else {
}
sizeof (arg));
break;
}
case 1:
/* Add time component and min random (i.e. 1). */
break;
default:
/* Add only time component. */
break;
}
/*
* New ISS not guaranteed to be ISS_INCR/2
* ahead of the current tcp_snxt, so add the
* difference to tcp_iss_incr_extra.
*/
}
/*
* If tcp_clean_death() can not perform the task now,
* drop the SYN packet and let the other side re-xmit.
* Otherwise pass the SYN packet back in, since the
* old tcp state has been cleaned up or freed.
*/
goto done;
/*
* We will come back to tcp_rput_data
* on the global queue. Packets destined
* for the global queue will be checked
* with global policy. But the policy for
* this packet has already been checked as
* this was destined for the detached
* connection. We need to bypass policy
* check this time by attaching a dummy
* ipsec_in with ipsec_in_dont_check set.
*/
return;
}
goto done;
}
/*
* rgap is the amount of stuff received out of window. A negative
* value is the amount out of window.
*/
if (rgap < 0) {
/* Fix seg_len and make sure there is something left. */
if (seg_len <= 0) {
goto done;
}
flags |= TH_ACK_NEEDED;
seg_len = 0;
goto process_ack;
}
}
/*
* Check whether we can update tcp_ts_recent. This test is
* NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP
* Extensions for High Performance: An Update", Internet Draft.
*/
if (tcp->tcp_snd_ts_ok &&
}
/* Always ack out of order packets */
flags |= TH_ACK_NEEDED;
seg_len = 0;
} else if (seg_len > 0) {
}
goto done;
}
/*
* Do not delete the TCP structure if it is in
* TIME_WAIT state. Refer to RFC 1122, 4.2.2.13.
*/
goto done;
}
if (bytes_acked <= 0) {
if (bytes_acked == 0 && seg_len == 0 &&
} else {
/* Acks something not sent */
flags |= TH_ACK_NEEDED;
}
}
if (flags & TH_ACK_NEEDED) {
/*
* Time to send an ack for some reason.
*/
}
done:
DB_CKSUMSTART(mp) = 0;
}
}
/*
* Allocate a T_SVR4_OPTMGMT_REQ.
* The caller needs to increment tcp_drop_opt_ack_cnt when sending these so
* that tcp_rput_other can drop the acks.
*/
static mblk_t *
{
struct T_optmgmt_req *tor;
char *optptr;
return (NULL);
if (optlen != 0) {
}
return (mp);
}
/*
* TCP Timers Implementation.
*/
{
} else {
}
return ((timeout_id_t)mp);
}
static void
tcp_timer_callback(void *arg)
{
}
static void
{
/*
* If the TCP has reached the closed state, don't proceed any
* further. This TCP logically does not exist on the system.
* tcpt_proc could for example access queues, that have already
* been qprocoff'ed off. Also see comments at the start of tcp_input
*/
} else {
tcp->tcp_timer_tid = 0;
}
}
/*
* There is potential race with untimeout and the handler firing at the same
* time. The mblock may be freed by the handler while we are trying to use
* it. But since both should execute on the same squeue, this race should not
* occur.
*/
{
return (-1);
if (delta >= 0) {
}
return (delta);
}
/*
* Allocate space for the timer event. The allocation looks like mblk, but it is
* not a proper mblk. To avoid confusion we set b_wptr to NULL.
*
* Dealing with failures: If we can't allocate from the timer cache we try
* allocating from dblock caches using allocb_tryhard(). In this case b_wptr
* points to b_rptr.
* If we can't allocate anything using allocb_tryhard(), we perform a last
* attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and
* save the actual allocation size in b_datap.
*/
mblk_t *
tcp_timermp_alloc(int kmflags)
{
/*
* Failed to allocate memory for the timer. Try allocating from
* dblock caches.
*/
/* ipclassifier calls this from a constructor - hence no tcps */
/*
* Memory is really low. Try tryhard allocation.
*
* ipclassifier calls this from a constructor -
* hence no tcps
*/
}
}
/* ipclassifier calls this from a constructor - hence no tcps */
return (mp);
}
/*
* Free per-tcp timer cache.
* It can only contain entries from tcp_timercache.
*/
void
{
}
}
/*
* Free timer event. Put it on the per-tcp timer cache if there is not too many
* events there already (currently at most two events are cached).
* If the event is not allocated from the timer cache, free it right away.
*/
static void
{
/*
* This allocation is not from a timer cache, free it right
* away.
*/
else
/* Cache this timer block for future allocations */
} else {
}
}
/*
* End of TCP Timers implementation.
*/
/*
* tcp_{set,clr}qfull() functions are used to either set or clear QFULL
* on the specified backing STREAMS q. Note, the caller may make the
* decision to call based on the tcp_t.tcp_flow_stopped value which
* when check outside the q's lock is only an advisory check ...
*/
void
{
mutex_enter(QLOCK(q));
/* still need to set QFULL */
mutex_exit(QLOCK(q));
} else {
mutex_exit(QLOCK(q));
}
}
}
void
{
mutex_enter(QLOCK(q));
mutex_exit(QLOCK(q));
qbackenable(q, 0);
} else {
mutex_exit(QLOCK(q));
}
}
}
/*
* kstats related to squeues i.e. not per IP instance
*/
static void *
{
tcp_g_stat_t template = {
{ "tcp_timermp_alloced", KSTAT_DATA_UINT64 },
{ "tcp_timermp_allocfail", KSTAT_DATA_UINT64 },
{ "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 },
{ "tcp_freelist_cleanup", KSTAT_DATA_UINT64 },
};
return (NULL);
return (ksp);
}
static void
{
}
}
static void *
{
tcp_stat_t template = {
{ "tcp_time_wait", KSTAT_DATA_UINT64 },
{ "tcp_time_wait_syn", KSTAT_DATA_UINT64 },
{ "tcp_time_wait_success", KSTAT_DATA_UINT64 },
{ "tcp_time_wait_fail", KSTAT_DATA_UINT64 },
{ "tcp_reinput_syn", KSTAT_DATA_UINT64 },
{ "tcp_ip_output", KSTAT_DATA_UINT64 },
{ "tcp_detach_non_time_wait", KSTAT_DATA_UINT64 },
{ "tcp_detach_time_wait", KSTAT_DATA_UINT64 },
{ "tcp_time_wait_reap", KSTAT_DATA_UINT64 },
{ "tcp_clean_death_nondetached", KSTAT_DATA_UINT64 },
{ "tcp_reinit_calls", KSTAT_DATA_UINT64 },
{ "tcp_eager_err1", KSTAT_DATA_UINT64 },
{ "tcp_eager_err2", KSTAT_DATA_UINT64 },
{ "tcp_eager_blowoff_calls", KSTAT_DATA_UINT64 },
{ "tcp_eager_blowoff_q", KSTAT_DATA_UINT64 },
{ "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64 },
{ "tcp_not_hard_bound", KSTAT_DATA_UINT64 },
{ "tcp_no_listener", KSTAT_DATA_UINT64 },
{ "tcp_found_eager", KSTAT_DATA_UINT64 },
{ "tcp_wrong_queue", KSTAT_DATA_UINT64 },
{ "tcp_found_eager_binding1", KSTAT_DATA_UINT64 },
{ "tcp_found_eager_bound1", KSTAT_DATA_UINT64 },
{ "tcp_eager_has_listener1", KSTAT_DATA_UINT64 },
{ "tcp_open_alloc", KSTAT_DATA_UINT64 },
{ "tcp_open_detached_alloc", KSTAT_DATA_UINT64 },
{ "tcp_rput_time_wait", KSTAT_DATA_UINT64 },
{ "tcp_listendrop", KSTAT_DATA_UINT64 },
{ "tcp_listendropq0", KSTAT_DATA_UINT64 },
{ "tcp_wrong_rq", KSTAT_DATA_UINT64 },
{ "tcp_rsrv_calls", KSTAT_DATA_UINT64 },
{ "tcp_eagerfree2", KSTAT_DATA_UINT64 },
{ "tcp_eagerfree3", KSTAT_DATA_UINT64 },
{ "tcp_eagerfree4", KSTAT_DATA_UINT64 },
{ "tcp_eagerfree5", KSTAT_DATA_UINT64 },
{ "tcp_timewait_syn_fail", KSTAT_DATA_UINT64 },
{ "tcp_listen_badflags", KSTAT_DATA_UINT64 },
{ "tcp_timeout_calls", KSTAT_DATA_UINT64 },
{ "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64 },
{ "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64 },
{ "tcp_timeout_canceled", KSTAT_DATA_UINT64 },
{ "tcp_timermp_freed", KSTAT_DATA_UINT64 },
{ "tcp_push_timer_cnt", KSTAT_DATA_UINT64 },
{ "tcp_ack_timer_cnt", KSTAT_DATA_UINT64 },
{ "tcp_ire_null1", KSTAT_DATA_UINT64 },
{ "tcp_ire_null", KSTAT_DATA_UINT64 },
{ "tcp_ip_send", KSTAT_DATA_UINT64 },
{ "tcp_ip_ire_send", KSTAT_DATA_UINT64 },
{ "tcp_wsrv_called", KSTAT_DATA_UINT64 },
{ "tcp_flwctl_on", KSTAT_DATA_UINT64 },
{ "tcp_timer_fire_early", KSTAT_DATA_UINT64 },
{ "tcp_timer_fire_miss", KSTAT_DATA_UINT64 },
{ "tcp_rput_v6_error", KSTAT_DATA_UINT64 },
{ "tcp_out_sw_cksum", KSTAT_DATA_UINT64 },
{ "tcp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 },
{ "tcp_zcopy_on", KSTAT_DATA_UINT64 },
{ "tcp_zcopy_off", KSTAT_DATA_UINT64 },
{ "tcp_zcopy_backoff", KSTAT_DATA_UINT64 },
{ "tcp_zcopy_disable", KSTAT_DATA_UINT64 },
{ "tcp_mdt_pkt_out", KSTAT_DATA_UINT64 },
{ "tcp_mdt_pkt_out_v4", KSTAT_DATA_UINT64 },
{ "tcp_mdt_pkt_out_v6", KSTAT_DATA_UINT64 },
{ "tcp_mdt_discarded", KSTAT_DATA_UINT64 },
{ "tcp_mdt_conn_halted1", KSTAT_DATA_UINT64 },
{ "tcp_mdt_conn_halted2", KSTAT_DATA_UINT64 },
{ "tcp_mdt_conn_halted3", KSTAT_DATA_UINT64 },
{ "tcp_mdt_conn_resumed1", KSTAT_DATA_UINT64 },
{ "tcp_mdt_conn_resumed2", KSTAT_DATA_UINT64 },
{ "tcp_mdt_legacy_small", KSTAT_DATA_UINT64 },
{ "tcp_mdt_legacy_all", KSTAT_DATA_UINT64 },
{ "tcp_mdt_legacy_ret", KSTAT_DATA_UINT64 },
{ "tcp_mdt_allocfail", KSTAT_DATA_UINT64 },
{ "tcp_mdt_addpdescfail", KSTAT_DATA_UINT64 },
{ "tcp_mdt_allocd", KSTAT_DATA_UINT64 },
{ "tcp_mdt_linked", KSTAT_DATA_UINT64 },
{ "tcp_fusion_flowctl", KSTAT_DATA_UINT64 },
{ "tcp_fusion_backenabled", KSTAT_DATA_UINT64 },
{ "tcp_fusion_urg", KSTAT_DATA_UINT64 },
{ "tcp_fusion_putnext", KSTAT_DATA_UINT64 },
{ "tcp_fusion_unfusable", KSTAT_DATA_UINT64 },
{ "tcp_fusion_aborted", KSTAT_DATA_UINT64 },
{ "tcp_fusion_unqualified", KSTAT_DATA_UINT64 },
{ "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64 },
{ "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64 },
{ "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64 },
{ "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 },
{ "tcp_sock_fallback", KSTAT_DATA_UINT64 },
{ "tcp_lso_enabled", KSTAT_DATA_UINT64 },
{ "tcp_lso_disabled", KSTAT_DATA_UINT64 },
{ "tcp_lso_times", KSTAT_DATA_UINT64 },
{ "tcp_lso_pkt_out", KSTAT_DATA_UINT64 },
};
return (NULL);
return (ksp);
}
static void
{
}
}
/*
* TCP Kstats implementation
*/
static void *
{
{ "rtoAlgorithm", KSTAT_DATA_INT32, 0 },
{ "rtoMin", KSTAT_DATA_INT32, 0 },
{ "rtoMax", KSTAT_DATA_INT32, 0 },
{ "maxConn", KSTAT_DATA_INT32, 0 },
{ "activeOpens", KSTAT_DATA_UINT32, 0 },
{ "passiveOpens", KSTAT_DATA_UINT32, 0 },
{ "attemptFails", KSTAT_DATA_UINT32, 0 },
{ "estabResets", KSTAT_DATA_UINT32, 0 },
{ "currEstab", KSTAT_DATA_UINT32, 0 },
{ "inSegs", KSTAT_DATA_UINT64, 0 },
{ "outSegs", KSTAT_DATA_UINT64, 0 },
{ "retransSegs", KSTAT_DATA_UINT32, 0 },
{ "connTableSize", KSTAT_DATA_INT32, 0 },
{ "outRsts", KSTAT_DATA_UINT32, 0 },
{ "outDataSegs", KSTAT_DATA_UINT32, 0 },
{ "outDataBytes", KSTAT_DATA_UINT32, 0 },
{ "retransBytes", KSTAT_DATA_UINT32, 0 },
{ "outAck", KSTAT_DATA_UINT32, 0 },
{ "outAckDelayed", KSTAT_DATA_UINT32, 0 },
{ "outUrg", KSTAT_DATA_UINT32, 0 },
{ "outWinUpdate", KSTAT_DATA_UINT32, 0 },
{ "outWinProbe", KSTAT_DATA_UINT32, 0 },
{ "outControl", KSTAT_DATA_UINT32, 0 },
{ "outFastRetrans", KSTAT_DATA_UINT32, 0 },
{ "inAckSegs", KSTAT_DATA_UINT32, 0 },
{ "inAckBytes", KSTAT_DATA_UINT32, 0 },
{ "inDupAck", KSTAT_DATA_UINT32, 0 },
{ "inAckUnsent", KSTAT_DATA_UINT32, 0 },
{ "inDataInorderSegs", KSTAT_DATA_UINT32, 0 },
{ "inDataInorderBytes", KSTAT_DATA_UINT32, 0 },
{ "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 },
{ "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 },
{ "inDataDupSegs", KSTAT_DATA_UINT32, 0 },
{ "inDataDupBytes", KSTAT_DATA_UINT32, 0 },
{ "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 },
{ "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 },
{ "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 },
{ "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 },
{ "inWinProbe", KSTAT_DATA_UINT32, 0 },
{ "inWinUpdate", KSTAT_DATA_UINT32, 0 },
{ "inClosed", KSTAT_DATA_UINT32, 0 },
{ "rttUpdate", KSTAT_DATA_UINT32, 0 },
{ "rttNoUpdate", KSTAT_DATA_UINT32, 0 },
{ "timRetrans", KSTAT_DATA_UINT32, 0 },
{ "timRetransDrop", KSTAT_DATA_UINT32, 0 },
{ "timKeepalive", KSTAT_DATA_UINT32, 0 },
{ "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 },
{ "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 },
{ "listenDrop", KSTAT_DATA_UINT32, 0 },
{ "listenDropQ0", KSTAT_DATA_UINT32, 0 },
{ "halfOpenDrop", KSTAT_DATA_UINT32, 0 },
{ "outSackRetransSegs", KSTAT_DATA_UINT32, 0 },
{ "connTableSize6", KSTAT_DATA_INT32, 0 }
};
return (NULL);
return (ksp);
}
static void
{
}
}
static int
{
int i;
netstack_t *ns;
return (EIO);
if (rw == KSTAT_WRITE)
return (EACCES);
return (-1);
return (-1);
}
for (i = 0; i < CONN_G_HASH_SIZE; i++) {
while ((connp =
switch (tcp_snmp_state(tcp)) {
case MIB2_TCP_established:
case MIB2_TCP_closeWait:
break;
}
}
}
return (0);
}
void
{
/* Already has an eager */
return;
}
case IPV4_VERSION:
break;
case IPV6_VERSION:
return;
}
break;
}
}
}
static squeue_func_t
tcp_squeue_switch(int val)
{
switch (val) {
case 1:
break;
case 2:
rval = squeue_enter;
break;
default:
break;
}
return (rval);
}
/*
* This is called once for each squeue - globally for all stack
* instances.
*/
static void
{
sizeof (tcp_squeue_priv_t), KM_SLEEP);
if (tcp_free_list_max_cnt == 0) {
/*
* Limit number of entries to 1% of availble memory / tcp_ncpus
*/
}
}