ip.h revision 48de1bd24caf1a4f9ff4414ba002d33c55cf37e2
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
#ifndef _INET_IP_H
#define _INET_IP_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/isa_defs.h>
#include <sys/hook_event.h>
#include <sys/hook_impl.h>
#include <inet/ip_stack.h>
#ifdef _KERNEL
#include <sys/multidata.h>
#ifdef DEBUG
#define ILL_DEBUG
#define IRE_DEBUG
#define NCE_DEBUG
#define CONN_DEBUG
#endif
#define IP_DEBUG
/*
* The mt-streams(9F) flags for the IP module; put here so that other
* "drivers" that are actually IP (e.g., ICMP, UDP) can use the same set
* of flags.
*/
#define IP_DEVMTFLAGS D_MP
#endif /* _KERNEL */
#define IP_MOD_NAME "ip"
#define IP_DEV_NAME "/dev/ip"
#define IP6_DEV_NAME "/dev/ip6"
#define UDP_MOD_NAME "udp"
#define UDP_DEV_NAME "/dev/udp"
#define UDP6_DEV_NAME "/dev/udp6"
#define TCP_MOD_NAME "tcp"
#define TCP_DEV_NAME "/dev/tcp"
#define TCP6_DEV_NAME "/dev/tcp6"
#define SCTP_MOD_NAME "sctp"
/* Minor numbers */
#define IPV4_MINOR 0
#define IPV6_MINOR 1
#define TCP_MINOR 2
#define TCP_MINOR6 3
#ifndef _IPADDR_T
#define _IPADDR_T
#endif
/* Number of bits in an address */
#define IP_ABITS 32
#define IPV6_ABITS 128
/*
* Flag to IP write side to indicate that the appln has sent in a pre-built
* IP header. Stored in ipha_ident (which is otherwise zero).
*/
#define IP_HDR_INCLUDED 0xFFFF
#define ILL_FRAG_HASH_TBL_COUNT ((unsigned int)64)
#define IPV4_ADDR_LEN 4
#define IP_ADDR_LEN IPV4_ADDR_LEN
#define IP_ARP_PROTO_TYPE 0x0800
#define IPV4_VERSION 4
#define IP_VERSION IPV4_VERSION
#define IP_SIMPLE_HDR_LENGTH_IN_WORDS 5
#define IP_SIMPLE_HDR_LENGTH 20
#define IP_MAX_HDR_LENGTH 60
/*
* 2 files should be cleaned up to remove all redundant definitions.
*/
#define IP_MAXPACKET 65535
#define IP_SIMPLE_HDR_VERSION \
#define UDPH_SIZE 8
/* Leave room for ip_newroute to tack on the src and target addresses */
#define OK_RESOLVER_MP(mp) \
/*
* Constants and type definitions to support IP IOCTL commands
*/
#define IP_IOC_IRE_DELETE 4
#define IP_IOC_IRE_DELETE_NO_REPLY 5
#define IP_IOC_IRE_ADVISE_NO_REPLY 6
#define IP_IOC_RTS_REQUEST 7
/* Common definitions used by IP IOCTL data structures */
typedef struct ipllcmd_s {
} ipllc_t;
/* IP IRE Change Command Structure. */
typedef struct ipic_s {
} ipic_t;
/* IP IRE Delete Command Structure. */
typedef struct ipid_s {
} ipid_t;
#ifdef _KERNEL
/*
* Temporary state for ip options parser.
*/
typedef struct ipoptp_s
{
} ipoptp_t;
/*
* Flag(s) for ipoptp_flags
*/
#define IPOPTP_ERROR 0x00000001
#endif /* _KERNEL */
/* Controls forwarding of IP packets, set via ndd */
#define IP_FORWARD_NEVER 0
#define IP_FORWARD_ALWAYS 1
#define IPH_HDR_LENGTH(ipha) \
#define IPH_HDR_VERSION(ipha) \
#ifdef _KERNEL
/*
* IP reassembly macros. We hide starting and ending offsets in b_next and
* b_prev of messages on the reassembly queue. The messages are chained using
* b_cont. These macros are used in ip_reassemble() so we don't have to see
* the ugly casts and assignments.
* Note that the offsets are <= 64k i.e. a uint_t is sufficient to represent
* them.
*/
#define IP_REASS_SET_START(mp, u) \
#define IP_REASS_SET_END(mp, u) \
#define IP_REASS_COMPLETE 0x1
#define IP_REASS_PARTIAL 0x2
#define IP_REASS_FAILED 0x4
/*
* Test to determine whether this is a module instance of IP or a
* driver instance of IP.
*/
/*
* The following two macros are used by IP to get the appropriate
* wq and rq for a conn. If it is a TCP conn, then we need
* from a conn directly if it knows that the conn is not TCP.
*/
#define CONNP_TO_WQ(connp) \
#define GRAB_CONN_LOCK(q) { \
}
#define RELEASE_CONN_LOCK(q) { \
}
/* "Congestion controlled" protocol */
/*
* Complete the pending operation. Usually an ioctl. Can also
* be a bind or option management request that got enqueued
* in an ipsq_t. Called on completion of the operation.
*/
#define CONN_OPER_PENDING_DONE(connp) { \
CONN_DEC_REF(connp); \
}
/* Get the credential of an IP queue of unknown type */
#define GET_QUEUE_CRED(wq) \
/*
* Flags for the various ip_fanout_* routines.
*/
/*
* Following flags are used by IPQoS to determine if policy processing is
* required.
*/
#ifndef IRE_DB_TYPE
#define IRE_DB_TYPE M_SIG
#endif
#ifndef IRE_DB_REQ_TYPE
#define IRE_DB_REQ_TYPE M_PCSIG
#endif
#ifndef IRE_ARPRESOLVE_TYPE
#define IRE_ARPRESOLVE_TYPE M_EVENT
#endif
/*
* Values for squeue switch:
*/
#define IP_SQUEUE_ENTER_NODRAIN 1
#define IP_SQUEUE_ENTER 2
/*
* This is part of the interface between Transport provider and
* IP which can be used to set policy information. This is usually
* accompanied with O_T_BIND_REQ/T_BIND_REQ.ip_bind assumes that
* only IPSEC_POLICY_SET is there when it is found in the chain.
* The information contained is an struct ipsec_req_t. On success
* or failure, either the T_BIND_ACK or the T_ERROR_ACK is returned.
* IPSEC_POLICY_SET is never returned.
*/
#define IPSEC_POLICY_SET M_SETOPTS
/* IP Fragmentation Reassembly Header */
typedef struct ipf_s {
struct ipf_s *ipf_hash_next;
int ipf_hole_cnt; /* Number of holes (hard-case). */
int ipf_end; /* Tail end offset (0 -> hard-case). */
} ipf_t;
typedef enum {
IB_PKT = 0x01,
OB_PKT = 0x02
} ip_pkt_t;
#define UPDATE_IB_PKT_COUNT(ire)\
{ \
(ire)->ire_ib_pkt_count++; \
/* \
* forwarding packet \
*/ \
else \
} \
}
#define UPDATE_OB_PKT_COUNT(ire)\
{ \
(ire)->ire_ob_pkt_count++;\
} \
}
{ \
switch (ipha->ipha_protocol) { \
case IPPROTO_UDP: \
break; \
default: \
break; \
} \
}
/*
* NCE_EXPIRED is TRUE when we have a non-permanent nce that was
* found to be REACHABLE more than ip_ire_arp_interval ms ago.
* This macro is used to age existing nce_t entries. The
* nce's will get cleaned up in the following circumstances:
* - ip_ire_trash_reclaim will free nce's using ndp_cache_reclaim
* when memory is low,
* - ip_arp_news, when updates are received.
* - if the nce is NCE_EXPIRED(), it will deleted, so that a new
* arp request will need to be triggered from an ND_INITIAL nce.
*
* Note that the nce state transition follows the pattern:
* ND_INITIAL -> ND_INCOMPLETE -> ND_REACHABLE
* after which the nce is deleted when it has expired.
*
* nce_last is the timestamp that indicates when the nce_res_mp in the
* nce_t was last updated to a valid link-layer address. nce_last gets
* - when the nce is created
* - every time we get a sane arp response for the nce.
*/
#endif /* _KERNEL */
/* ICMP types */
#define ICMP_ECHO_REPLY 0
#define ICMP_DEST_UNREACHABLE 3
#define ICMP_SOURCE_QUENCH 4
#define ICMP_REDIRECT 5
#define ICMP_ECHO_REQUEST 8
#define ICMP_ROUTER_ADVERTISEMENT 9
#define ICMP_ROUTER_SOLICITATION 10
#define ICMP_TIME_EXCEEDED 11
#define ICMP_PARAM_PROBLEM 12
#define ICMP_TIME_STAMP_REQUEST 13
#define ICMP_TIME_STAMP_REPLY 14
#define ICMP_INFO_REQUEST 15
#define ICMP_INFO_REPLY 16
#define ICMP_ADDRESS_MASK_REQUEST 17
#define ICMP_ADDRESS_MASK_REPLY 18
/* ICMP_TIME_EXCEEDED codes */
#define ICMP_TTL_EXCEEDED 0
#define ICMP_REASSEMBLY_TIME_EXCEEDED 1
/* ICMP_DEST_UNREACHABLE codes */
#define ICMP_NET_UNREACHABLE 0
#define ICMP_HOST_UNREACHABLE 1
#define ICMP_PROTOCOL_UNREACHABLE 2
#define ICMP_PORT_UNREACHABLE 3
#define ICMP_FRAGMENTATION_NEEDED 4
#define ICMP_SOURCE_ROUTE_FAILED 5
#define ICMP_DEST_NET_UNKNOWN 6
#define ICMP_DEST_HOST_UNKNOWN 7
#define ICMP_SRC_HOST_ISOLATED 8
#define ICMP_DEST_NET_UNREACH_ADMIN 9
#define ICMP_DEST_HOST_UNREACH_ADMIN 10
#define ICMP_DEST_NET_UNREACH_TOS 11
#define ICMP_DEST_HOST_UNREACH_TOS 12
/* ICMP Header Structure */
typedef struct icmph_s {
union {
} u_echo;
struct { /* Destination unreachable structure */
} u_du;
struct { /* Parameter problem structure */
} u_pp;
struct { /* Redirect structure */
} u_rd;
} icmph_u;
} icmph_t;
#define ICMPH_SIZE 8
/*
* Minimum length of transport layer header included in an ICMP error
* message for it to be considered valid.
*/
#define ICMP_MIN_TP_HDR_LEN 8
/* Aligned IP header */
typedef struct ipha_s {
} ipha_t;
/* ECN code points for IPv4 TOS byte and IPv6 traffic class octet. */
/* IP Mac info structure */
typedef struct ip_m_s {
int ip_m_type; /* From <net/if_types.h> */
uint32_t *, in6_addr_t *);
} ip_m_t;
/*
* The following functions attempt to reduce the link layer dependency
* of the IP stack. The current set of link specific operations are:
* a. map from IPv4 class D (224.0/4) multicast address range to the link
* layer multicast address range.
* b. map from IPv6 multicast address range (ff00::/8) to the link
* layer multicast address range.
* c. derive the default IPv6 interface identifier from the link layer
* address.
*/
/* Router entry types */
/* net without any address mapping. */
/* net with resolver. */
/*
* IRE_MIPRTUN is only set on the ires in the ip_mrtun_table.
* This ire_type must not be set for ftable and ctable routing entries.
*/
/*
* If an IRE is marked with IRE_MARK_CONDEMNED, the last walker of
* the bucket should delete this IRE from this bucket.
*/
#define IRE_MARK_CONDEMNED 0x0001
/*
* If a broadcast IRE is marked with IRE_MARK_NORECV, ip_rput will drop the
* broadcast packets received on that interface. This is marked only
* on broadcast ires. Employed by IPMP, where we have multiple NICs on the
* same subnet receiving the same broadcast packet.
*/
#define IRE_MARK_NORECV 0x0002
/*
* IRE_CACHE marked this way won't be returned by ire_cache_lookup. Need
* to look specifically using MATCH_IRE_MARK_HIDDEN. Used by IPMP.
*/
/*
* ire with IRE_MARK_NOADD is created in ip_newroute_ipif, when outgoing
* interface is specified by IP_XMIT_IF socket option. This ire is not
* added in IRE_CACHE. For example, this is used by mipagent to prevent
* any entry to be added in the cache table. We do not want to add any
* entry for a mobile-node in the routing table for foreign agent originated
* packets. Adding routes in cache table in this case, may run the risks of
* incorrect routing path in case of private overlapping addresses.
*/
/*
* IRE marked with IRE_MARK_TEMPORARY means that this IRE has been used
* either for forwarding a packet or has not been used for sending
* traffic on TCP connections terminated on this system. In both
* cases, this IRE is the first to go when IRE is being cleaned up.
*/
#define IRE_MARK_TEMPORARY 0x0010
/*
* IRE marked with IRE_MARK_USESRC_CHECK means that while adding an IRE with
* this mark, additional atomic checks need to be performed. For eg: by the
* time an IRE_CACHE is created, sent up to ARP and then comes back to IP; the
* usesrc grouping could have changed in which case we want to fail adding
* the IRE_CACHE entry
*/
#define IRE_MARK_USESRC_CHECK 0x0020
/*
* IRE_MARK_PRIVATE_ADDR is used for IP_NEXTHOP. When IP_NEXTHOP is set, the
* routing table lookup for the destination is bypassed and the packet is
* sent directly to the specified nexthop. The associated IRE_CACHE entries
* should be marked with IRE_MARK_PRIVATE_ADDR flag so that they don't show up
* in regular ire cache lookups.
*/
#define IRE_MARK_PRIVATE_ADDR 0x0040
/*
* When we send an ARP resolution query for the nexthop gateway's ire,
* we use esballoc to create the ire_t in the AR_ENTRY_QUERY mblk
* chain, and mark its ire_marks with IRE_MARK_UNCACHED. This flag
* indicates that information from ARP has not been transferred to a
* permanent IRE_CACHE entry. The flag is reset only when the
* information is successfully transferred to an ire_cache entry (in
* ire_add()). Attempting to free the AR_ENTRY_QUERY mblk chain prior
* to ire_add (e.g., from arp, or from ip`ip_wput_nondata) will
* be cleaned up. The free callback routine (ire_freemblk()) checks
* for IRE_MARK_UNCACHED to see if any resources that are pinned down
* will need to be cleaned up or not.
*/
#define IRE_MARK_UNCACHED 0x0080
/*
* The comment below (and for other netstack_t references) refers
* to the fact that we only do netstack_hold in particular cases,
* such as the references from open streams (ill_t and conn_t's
* pointers). Internally within IP we rely on IP's ability to cleanup e.g.
* ire_t's when an ill goes away.
*/
typedef struct ire_expire_arg_s {
int iea_flush_flag;
/* Flags with ire_expire routine */
/* Arguments to ire_flush_cache() */
#define IRE_FLUSH_DELETE 0
#define IRE_FLUSH_ADD 1
/*
* These are kept in a separate field in the conn and the synchronization
* depends on the atomic 32 bit access to that field.
*/
/* Used to check connection state flags before caching the IRE */
#define CONN_CACHE_IRE(connp) \
/*
* Parameter to ip_output giving the identity of the caller.
* IP_WSRV means the packet was enqueued in the STREAMS queue
* due to flow control and is now being reprocessed in the context of
* the STREAMS service procedure, consequent to flow control relief.
* IRE_SEND means the packet is being reprocessed consequent to an
* ire cache creation and addition and this may or may not be happening
* in the service procedure context. Anything other than the above 2
* cases is identified as IP_WPUT. Most commonly this is the case of
* packets coming down from the application.
*/
#ifdef _KERNEL
/*
*/
#define MAX_FILTER_SIZE 64
typedef struct slist_s {
int sl_numsrc;
} slist_t;
/*
* Following struct is used to maintain retransmission state for
* a multicast group. One rtx_state_t struct is an in-line field
* of the ilm_t struct; the slist_ts in the rtx_state_t struct are
* alloc'd as needed.
*/
typedef struct rtx_state_s {
int rtx_cnt; /* retrans count */
int rtx_fmode_cnt; /* retrans count for fmode change */
} rtx_state_t;
/*
* Used to construct list of multicast address records that will be
* sent in a single listener report.
*/
typedef struct mrec_s {
} mrec_t;
/* Group membership list per upper conn */
/*
* XXX can we make ilg survive an ifconfig unplumb + plumb
*
* ilg_ipif is used by IPv4 as multicast groups are joined using an interface
* address (ipif).
* ilg_ill is used by IPv6 as multicast groups are joined using an interface
* index (phyint->phyint_ifindex).
* ilg_ill is NULL for IPv4 and ilg_ipif is NULL for IPv6.
*
* ilg records the state of multicast memberships of a socket end point.
* ilm records the state of multicast memberships with the driver and is
* maintained per interface.
*
* Notes :
*
* 1) There is no direct link between a given ilg and ilm. If the
* application has joined a group G with ifindex I, we will have
* an ilg with ilg_v6group and ilg_ill. There will be a corresponding
* ilm with ilm_ill/ilm_v6addr recording the multicast membership.
* To delete the membership,
*
* a) Search for ilg matching on G and I with ilg_v6group
* and ilg_ill. Delete ilg_ill.
* b) Search the corresponding ilm matching on G and I with
* ilm_v6addr and ilm_ill. Delete ilm.
*
* In IPv4, the only difference is, we look using ipifs instead of
* ills.
*
* 2) With IP multipathing, we want to keep receiving even after the
* interface has failed. We do this by moving multicast memberships
* to a new_ill within the group. This is acheived by sending
* need to be able to delete memberships which will still come down
* with the ifindex of the old ill which is what the application
* knows of. Thus we store the ilm_/ilg_orig_ifindex to keep track
* of where we joined initially so that we can lookup even after we
* moved the membership. It is also used for moving back the membership
* when the old ill has been repaired. This is done by looking up for
* ilms with ilm_orig_ifindex matching on the old ill's ifindex. Only
* ilms actually move from old ill to new ill. ilgs don't move (just
* the ilg_ill is changed when it moves) as it just records the state
* of the application that has joined a group G where as ilm records
* the state joined with the driver. Thus when we send DL_XXXMULTI_REQs
* we also need to keep the ilm in the right ill.
*
* In IPv4, as ipifs move from old ill to new_ill, ilgs and ilms move
* implicitly as we use only ipifs in IPv4. Thus, one can always lookup
* orig_ifindex. We move ilms still to record the driver state as
* mentioned above.
*/
/*
* The ilg_t and ilm_t members are protected by ipsq. They can be changed only
* multicast group has to execute in the ipsq.
*/
typedef struct ilg_s {
int ilg_orig_ifindex; /* Interface originally joined on */
} ilg_t;
/*
* Multicast address list entry for lower ill.
* ilm_ipif is used by IPv4 as multicast groups are joined using ipif.
* ilm_ill is used by IPv6 as multicast groups are joined using ill.
* ilm_ill is NULL for IPv4 and ilm_ipif is NULL for IPv6.
*
* The comment below (and for other netstack_t references) refers
* to the fact that we only do netstack_hold in particular cases,
* such as the references from open streams (ill_t and conn_t's
* pointers). Internally within IP we rely on IP's ability to cleanup e.g.
* ire_t's when an ill goes away.
*/
typedef struct ilm_s {
int ilm_refcnt;
int ilm_orig_ifindex; /* V6_MULTICAST_IF/ilm_ipif index */
int ilm_no_ilg_cnt; /* number of joins w/ no ilg */
} ilm_t;
/*
* ilm_walker_cleanup needs to execute when the ilm_walker_cnt goes down to
* zero. In addition it needs to block new walkers while it is unlinking ilm's
* from the list. Thus simple atomics for the ill_ilm_walker_cnt don't suffice.
*/
#define ILM_WALKER_HOLD(ill) { \
ill->ill_ilm_walker_cnt++; \
}
#define ILM_WALKER_RELE(ill) { \
(ill)->ill_ilm_walker_cnt--; \
}
/*
* Soft reference to an IPsec SA.
*
* On relative terms, conn's can be persistant (living as long as the
* processes which create them), while SA's are ephemeral (dying when
* they hit their time-based or byte-based lifetimes).
*
* We could hold a hard reference to an SA from an ipsec_latch_t,
* but this would cause expired SA's to linger for a potentially
* unbounded time.
*
* Instead, we remember the hash bucket number and bucket generation
* in addition to the pointer. The bucket generation is incremented on
* each deletion.
*/
typedef struct ipsa_ref_s
{
struct isaf_s *ipsr_bucket;
} ipsa_ref_t;
/*
* IPsec "latching" state.
*
* In the presence of IPsec policy, fully-bound conn's bind a connection
* to more than just the 5-tuple, but also a specific IPsec action and
* identity-pair.
*
* As an optimization, we also cache soft references to IPsec SA's
* here so that we can fast-path around most of the work needed for
* outbound IPsec SA selection.
*
* Were it not for TCP's detached connections, this state would be
* in-line in conn_t; instead, this is in a separate structure so it
* can be handed off to TCP when a connection is detached.
*/
typedef struct ipsec_latch_s
{
struct ipsid_s *ipl_local_cid;
struct ipsid_s *ipl_remote_cid;
unsigned int
ipl_ids_latched : 1,
ipl_pad_to_bit_31 : 27;
#define IPLATCH_REFHOLD(ipl) { \
}
membar_exit(); \
}
/*
* peer identity structure.
*/
/*
* The old IP client structure "ipc_t" is gone. All the data is stored in the
* connection structure "conn_t" now. The mapping of old and new fields looks
* like this:
*
* ipc_ulp conn_ulp
* ipc_rq conn_rq
* ipc_wq conn_wq
*
* ipc_laddr conn_src
* ipc_faddr conn_rem
* ipc_v6laddr conn_srcv6
* ipc_v6faddr conn_remv6
*
* ipc_lport conn_lport
* ipc_fport conn_fport
* ipc_ports conn_ports
*
* ipc_policy conn_policy
* ipc_latch conn_latch
*
* ipc_irc_lock conn_lock
* ipc_ire_cache conn_ire_cache
*
* ipc_state_flags conn_state_flags
* ipc_outgoing_ill conn_outgoing_ill
*
* ipc_dontroute conn_dontroute
* ipc_loopback conn_loopback
* ipc_broadcast conn_broadcast
* ipc_reuseaddr conn_reuseaddr
*
* ipc_multicast_loop conn_multicast_loop
* ipc_multi_router conn_multi_router
* ipc_priv_stream conn_priv_stream
* ipc_draining conn_draining
*
* ipc_did_putbq conn_did_putbq
* ipc_unspec_src conn_unspec_src
* ipc_policy_cached conn_policy_cached
*
* ipc_in_enforce_policy conn_in_enforce_policy
* ipc_out_enforce_policy conn_out_enforce_policy
* ipc_af_isv6 conn_af_isv6
* ipc_pkt_isv6 conn_pkt_isv6
*
* ipc_ipv6_recvpktinfo conn_ipv6_recvpktinfo
*
* ipc_ipv6_recvhoplimit conn_ipv6_recvhoplimit
* ipc_ipv6_recvhopopts conn_ipv6_recvhopopts
* ipc_ipv6_recvdstopts conn_ipv6_recvdstopts
*
* ipc_ipv6_recvrthdr conn_ipv6_recvrthdr
* ipc_ipv6_recvrtdstopts conn_ipv6_recvrtdstopts
* ipc_fully_bound conn_fully_bound
*
* ipc_recvif conn_recvif
*
* ipc_recvslla conn_recvslla
* ipc_acking_unbind conn_acking_unbind
* ipc_pad_to_bit_31 conn_pad_to_bit_31
*
* ipc_xmit_if_ill conn_xmit_if_ill
* ipc_nofailover_ill conn_nofailover_ill
*
* ipc_proto conn_proto
* ipc_incoming_ill conn_incoming_ill
* ipc_outgoing_pill conn_outgoing_pill
* ipc_pending_ill conn_pending_ill
* ipc_unbind_mp conn_unbind_mp
* ipc_ilg conn_ilg
* ipc_ilg_allocated conn_ilg_allocated
* ipc_ilg_inuse conn_ilg_inuse
* ipc_ilg_walker_cnt conn_ilg_walker_cnt
* ipc_refcv conn_refcv
* ipc_multicast_ipif conn_multicast_ipif
* ipc_multicast_ill conn_multicast_ill
* ipc_orig_bound_ifindex conn_orig_bound_ifindex
* ipc_orig_multicast_ifindex conn_orig_multicast_ifindex
* ipc_orig_xmit_ifindex conn_orig_xmit_ifindex
* ipc_drain_next conn_drain_next
* ipc_drain_prev conn_drain_prev
* ipc_idl conn_idl
*/
/*
* policy.
*/
typedef struct ipsec_selector {
/*
* Note that we put v4 addresses in the *first* 32-bit word of the
* in spd.c
*/
/* Values used in IP by IPSEC Code */
#define IPSEC_OUTBOUND B_TRUE
#define IPSEC_INBOUND B_FALSE
/*
* There are two variants in policy failures. The packet may come in
* secure when not needed (IPSEC_POLICY_???_NOT_NEEDED) or it may not
* have the desired level of protection (IPSEC_POLICY_MISMATCH).
*/
#define IPSEC_POLICY_NOT_NEEDED 0
#define IPSEC_POLICY_MISMATCH 1
#define IPSEC_POLICY_AUTH_NOT_NEEDED 2
#define IPSEC_POLICY_ENCR_NOT_NEEDED 3
#define IPSEC_POLICY_SE_NOT_NEEDED 4
/*
* Folowing macro is used whenever the code does not know whether there
* is a M_CTL present in the front and it needs to examine the actual mp
* i.e the IP header. As a M_CTL message could be in the front, this
* extracts the packet into mp and the M_CTL mp into first_mp. If M_CTL
* mp is not present, both first_mp and mp point to the same message.
*/
(mctl_present) = B_TRUE; \
} else { \
(mctl_present) = B_FALSE; \
}
/*
* Check with IPSEC inbound policy if
*
* 1) per-socket policy is present - indicated by conn_in_enforce_policy.
* 2) Or if we have not cached policy on the conn and the global policy is
* non-empty.
*/
((connp)->conn_in_enforce_policy || \
(!((connp)->conn_policy_cached) && \
((connp)->conn_in_enforce_policy || \
(!(connp)->conn_policy_cached && \
((connp)->conn_out_enforce_policy || \
(!((connp)->conn_policy_cached) && \
((connp)->conn_out_enforce_policy || \
(!(connp)->conn_policy_cached && \
/*
* Information cached in IRE for upper layer protocol (ULP).
*
* Notice that ire_max_frag is not included in the iulp_t structure, which
* it may seem that it should. But ire_max_frag cannot really be cached. It
* is fixed for each interface. For MTU found by PMTUd, we may want to cache
* it. But currently, we do not do that.
*/
typedef struct iulp_s {
iulp_not_used : 28;
} iulp_t;
/* Zero iulp_t. */
extern const iulp_t ire_uinfo_null;
/*
* The conn drain list structure.
* The list is protected by idl_lock. Each conn_t inserted in the list
* points back at this idl_t using conn_idl. IP primes the draining of the
* conns queued in these lists, by qenabling the 1st conn of each list. This
* occurs when STREAMS backenables ip_wsrv on the IP module. Each conn instance
* of ip_wsrv successively qenables the next conn in the list.
* idl_lock protects all other members of idl_t and conn_drain_next
* and conn_drain_prev of conn_t. The conn_lock protects IPCF_DRAIN_DISABLED
* flag of the conn_t and conn_idl.
*/
typedef struct idl_s {
/* drain list again */
idl_unused : 31;
} idl_t;
/*
* Interface route structure which holds the necessary information to recreate
* routes that are tied to an interface (namely where ire_ipif != NULL).
* These routes which were initially created via a routing socket or via the
* SIOCADDRT ioctl may be gateway routes (RTF_GATEWAY being set) or may be
* traditional interface routes. When an interface comes back up after being
* marked down, this information will be used to recreate the routes. These
* are part of an mblk_t chain that hangs off of the IPIF (ipif_saved_ire_mp).
*/
typedef struct ifrt_s {
} ifrt_t;
/* Number of IP addresses that can be hosted on a physical interface */
#define MAX_ADDRS_PER_IF 8192
/*
* Number of Source addresses to be considered for source address
* selection. Used by ipif_select_source[_v6].
*/
#define MAX_IPIF_SELECT_SOURCE 50
#ifdef IP_DEBUG
/*
* Tracing refholds and refreleases for debugging. Existing tracing mechanisms
* mechanism should be revisited once dtrace is available.
*/
#define IP_STACK_DEPTH 15
typedef struct tr_buf_s {
int tr_depth;
} tr_buf_t;
typedef struct th_trace_s {
struct th_trace_s *th_next;
struct th_trace_s **th_prev;
int th_refcnt;
#define TR_BUF_MAX 38
} th_trace_t;
#endif
/* The following are ipif_state_flags */
/* IP interface structure, one per local address */
typedef struct ipif_s {
int ipif_id; /* Logical unit number */
/* prevent awkward out of mem */
/* condition later */
/* on this interface so that they */
/* can survive ifconfig down. */
/* will be reported on. Used when */
/* handling an igmp timeout. */
/*
* The packet counts in the ipif contain the sum of the
* packet counts in dead IREs that were affiliated with
* this ipif.
*/
/* Exclusive bit fields, protected by ipsq_t */
unsigned int
ipif_pad_to_31 : 26;
int ipif_orig_ifindex; /* ifindex before SLIFFAILOVER */
ipif_zoneid; /* zone ID number */
#ifdef ILL_DEBUG
#define IP_TR_HASH_MAX 64
#endif
} ipif_t;
/*
* The following table lists the protection levels of the various members
* of the ipif_t. The following notation is used.
*
* Write once - Written to only once at the time of bringing up
* the interface and can be safely read after the bringup without any lock.
*
* ipsq - Need to execute in the ipsq to perform the indicated access.
*
* ill_lock - Need to hold this mutex to perform the indicated access.
*
* write access respectively.
*
* down ill - Written to only when the ill is down (i.e all ipifs are down)
* up ill - Read only when the ill is up (i.e. at least 1 ipif is up)
*
* Table of ipif_t members and their protection
*
* ipif_next ipsq + ill_lock + ipsq OR ill_lock OR
* ill_g_lock ill_g_lock
* ipif_ill ipsq + down ipif write once
* ipif_id ipsq + down ipif write once
* ipif_mtu ipsq
* ipif_v6lcl_addr ipsq + down ipif up ipif
* ipif_v6src_addr ipsq + down ipif up ipif
* ipif_v6subnet ipsq + down ipif up ipif
* ipif_v6net_mask ipsq + down ipif up ipif
*
* ipif_v6brd_addr
* ipif_v6pp_dst_addr
* ipif_flags ill_lock ill_lock
* ipif_metric
* ipif_ire_type ipsq + down ill up ill
*
* ipif_arp_del_mp ipsq ipsq
* ipif_saved_ire_mp ipif_saved_ire_lock ipif_saved_ire_lock
* ipif_igmp_rpt ipsq ipsq
*
* ipif_fo_pkt_count Approx
* ipif_ib_pkt_count Approx
* ipif_ob_pkt_count Approx
*
* bit fields ill_lock ill_lock
*
* ipif_orig_ifindex ipsq None
* ipif_orig_ipifid ipsq None
* ipif_seqid ipsq Write once
*
* ipif_state_flags ill_lock ill_lock
* ipif_refcnt ill_lock ill_lock
* ipif_ire_cnt ill_lock ill_lock
* ipif_saved_ire_cnt
*/
#ifdef ILL_DEBUG
#else
#define IPIF_TRACE_REF(ipif)
#define ILL_TRACE_REF(ill)
#define IPIF_UNTRACE_REF(ipif)
#define ILL_UNTRACE_REF(ill)
#define ILL_TRACE_CLEANUP(ill)
#define IPIF_TRACE_CLEANUP(ipif)
#endif
/* IPv4 compatability macros */
/* Macros for easy backreferences to the ill. */
/*
* mode value for ip_ioctl_finish for finishing an ioctl
*/
/*
* The IP-MT design revolves around the serialization object ipsq_t.
* It is associated with an IPMP group. If IPMP is not enabled, there is
* 1 ipsq_t per phyint. Eg. an ipsq_t would cover both hme0's IPv4 stream
*
* ipsq_lock protects
* ipsq_reentry_cnt, ipsq_writer, ipsq_xopq_mphead, ipsq_xopq_mptail,
* ipsq_mphead, ipsq_mptail, ipsq_split
*
* ipsq_pending_ipif, ipsq_current_ipif, ipsq_pending_mp, ipsq_flags,
* ipsq_waitfor
*
* The fields in the last line above below are set mostly by a writer thread
* But there is an exception in the last call to ipif_ill_refrele_tail which
* could also race with a conn close which could be cleaning up the
* fields. So we choose to protect using ipsq_lock instead of depending on
* the property of the writer.
* ill_g_lock protects
* ipsq_refs, ipsq_phyint_list
*/
typedef struct ipsq_s {
int ipsq_reentry_cnt;
int ipsq_flags;
int ipsq_current_ioctl; /* current ioctl, or 0 if no ioctl */
/* response from another module */
int ipsq_waitfor; /* Values encoded below */
#ifdef ILL_DEBUG
int ipsq_depth; /* debugging aid */
#endif
} ipsq_t;
/* ipsq_flags */
/*
* ipsq_waitfor:
*
* IPIF_DOWN 1 ipif_down waiting for refcnts to drop
* ILL_DOWN 2 ill_down waiting for refcnts to drop
* IPIF_FREE 3 ipif_free waiting for refcnts to drop
* ILL_FREE 4 ill unplumb waiting for refcnts to drop
* ILL_MOVE_OK 5 failover waiting for refcnts to drop
*/
/* Flags passed to ipsq_try_enter */
#define CUR_OP 0 /* Current ioctl continuing again */
/*
* phyint represents state that is common to both IPv4 and IPv6 interfaces.
* There is a separate ill_t representing IPv4 and IPv6 which has a
* backpointer to the phyint structure for acessing common state.
*
* NOTE : It just stores the group name as there is only one name for
* IPv4 and IPv6 i.e it is a underlying link property. Actually
* IPv4 and IPv6 ill are grouped together when their phyints have
* the same name.
*/
typedef struct phyint {
struct ill_s *phyint_illv4;
struct ill_s *phyint_illv6;
char *phyint_groupname; /* SIOCSLIFGROUPNAME */
/* Once Clearview IPMP is added the follow two fields can be removed */
} phyint_t;
#define CACHE_ALIGN_SIZE 64
struct _phyint_list_s_ {
};
typedef union phyint_list_u {
struct _phyint_list_s_ phyint_list_s;
/*
* ILL groups. We group ills,
*
* - if the ills have the same group name. (New way)
*
* ill_group locking notes:
*
* illgrp_lock protects ill_grp_ill_schednext.
*
* ill_g_lock protects ill_grp_next, illgrp_ill, illgrp_ill_count.
* Holding ill_g_lock freezes the memberships of ills in IPMP groups.
* It also freezes the global list of ills and all ipifs in all ills.
*
* To remove an ipif from the linked list of ipifs of that ill ipif_free_tail
* holds both ill_g_lock, and ill_lock. Similarly to remove an ill from the
* global list of ills, ill_delete_glist holds ill_g_lock as writer.
* This simplifies things for ipif_select_source, illgrp_scheduler etc.
* that need to walk the members of an illgrp. They just hold ill_g_lock
* as reader to do the walk.
*
*/
typedef struct ill_group {
int illgrp_ill_count;
} ill_group_t;
/*
* Fragmentation hash bucket
*/
typedef struct ipfb_s {
} ipfb_t;
/*
* IRE bucket structure. Usually there is an array of such structures,
* each pointing to a linked list of ires. irb_refcnt counts the number
* of walkers of a given hash bucket. Usually the reference count is
* bumped up if the walker wants no IRES to be DELETED while walking the
* list. Bumping up does not PREVENT ADDITION. This allows walking a given
* hash bucket without stumbling up on a free pointer.
*
* irb_t structures in ip_ftable are dynamically allocated and freed.
* In order to identify the irb_t structures that can be safely kmem_free'd
* we need to ensure that
* - the irb_refcnt is quiescent, indicating no other walkers,
* - no other threads or ire's are holding references to the irb,
* i.e., irb_nire == 0,
* - there are no active ire's in the bucket, i.e., irb_ire_cnt == 0
*/
typedef struct irb {
/* Should be first in this struct */
#define IRB_MARK_CONDEMNED 0x0001
#define IRB_MARK_FTABLE 0x0002
int irb_nire; /* Num of ftable ire's that ref irb */
} irb_t;
/* The following are return values of ip_xmit_v4() */
typedef enum {
SEND_PASSED = 0, /* sent packet out on wire */
SEND_FAILED, /* sending of packet failed */
LOOKUP_IN_PROGRESS, /* ire cache found, ARP resolution in progress */
LLHDR_RESLV_FAILED /* macaddr resl of onlink dst or nexthop failed */
#define IP_V4_G_HEAD 0
#define IP_V6_G_HEAD 1
#define MAX_G_HEADS 2
/*
* unpadded ill_if structure
*/
struct _ill_if_s_ {
union ill_if_u *illif_next;
union ill_if_u *illif_prev;
int illif_name_len; /* name length */
};
/* cache aligned ill_if structure */
typedef union ill_if_u {
struct _ill_if_s_ ill_if_s;
} ill_if_t;
typedef struct ill_walk_context_s {
int ctx_current_list; /* current list being searched */
int ctx_last_list; /* last list to search */
/*
* ill_g_heads structure, one for IPV4 and one for IPV6
*/
struct _ill_g_head_s_ {
};
typedef union ill_g_head_u {
struct _ill_g_head_s_ ill_g_head_s;
} ill_g_head_t;
#define IP_V4_ILL_G_LIST(ipst) \
#define IP_V6_ILL_G_LIST(ipst) \
#define IP_VX_ILL_G_LIST(i, ipst) \
/*
* Capabilities, possible flags for ill_capabilities.
*/
/*
* Per-ill Multidata Transmit capabilities.
*/
typedef struct ill_mdt_capab_s ill_mdt_capab_t;
/*
* Per-ill IPsec capabilities.
*/
typedef struct ill_ipsec_capab_s ill_ipsec_capab_t;
/*
* Per-ill Hardware Checksumming capbilities.
*/
typedef struct ill_hcksum_capab_s ill_hcksum_capab_t;
/*
* Per-ill Zero-copy capabilities.
*/
typedef struct ill_zerocopy_capab_s ill_zerocopy_capab_t;
/*
*/
typedef struct ill_dls_capab_s ill_dls_capab_t;
/*
* Per-ill polling resource map.
*/
typedef struct ill_rx_ring ill_rx_ring_t;
/*
* Per-ill Large Segment Offload capabilities.
*/
typedef struct ill_lso_capab_s ill_lso_capab_t;
/* The following are ill_state_flags */
/* Is this an ILL whose source address is used by other ILL's ? */
#define IS_USESRC_ILL(ill) \
(((ill)->ill_usesrc_ifindex == 0) && \
#define IS_USESRC_CLI_ILL(ill) \
(((ill)->ill_usesrc_ifindex != 0) && \
/* Is this an virtual network interface (vni) ILL ? */
/* Is this a loopback ILL? */
#define IS_LOOPBACK(ill) \
/*
* IP Lower level Structure.
* Instance data structure in ip_open when there is a device below us.
*/
typedef struct ill_s {
int ill_error; /* Error value sent up by device. */
char *ill_name; /* Our name. */
char *ill_ndd_name; /* Name + ":ip?_forwarding" for NDD. */
/*
* Physical Point of Attachment num. If DLPI style 1 provider
* then this is derived from the devname.
*/
/* supports broadcast. */
int ill_mcast_type; /* type of router which is querier */
/* on this interface */
int ill_mcast_qi; /* IGMPv3/MLDv2 query interval var */
/*
* All non-NULL cells between 'ill_first_mp_to_free' and
* 'ill_last_mp_to_free' are freed in ill_delete.
*/
#define ill_first_mp_to_free ill_bcast_mp
#define ill_last_mp_to_free ill_phys_addr_mp
/* Following bit fields protected by ipsq_t */
ill_needs_attach : 1,
ill_reserved : 1,
ill_isv6 : 1,
ill_dlpi_style_set : 1,
ill_ifname_pending : 1,
ill_join_allmulti : 1,
ill_logical_down : 1,
ill_dl_up : 1,
ill_up_ipifs : 1,
ill_pad_to_bit_31 : 19;
/* Following bit fields protected by ill_lock */
ill_ilm_cleanup_reqd : 1,
ill_arp_closing : 1,
ill_pad_bit_31 : 25;
/*
* Used in SIOCSIFMUXID and SIOCGIFMUXID for 'ifconfig unplumb'.
*/
int ill_arp_muxid; /* muxid returned from plink for arp */
int ill_ip_muxid; /* muxid returned from plink for ip */
/*
* NIC event information attached, to be used by nic event hooks.
*/
/*
* Used for IP frag reassembly throttling on a per ILL basis.
*
* Note: frag_count is approximate, its added to and subtracted from
* collide, also ill_frag_purge() recalculates its value by
* summing all the ipfb_count's without locking out updates
* to the ipfb's.
*/
int ill_type; /* From <net/if_types.h> */
/*
* Capabilities related fields.
*/
/*
* New fields for IPv6
*/
/*
* Following two mblks are allocated common to all
* the ipifs when the first interface is coming up.
* It is sent up to arp when the last ipif is coming
* down.
*/
/*
* Used for implementing IFF_NOARP. As IFF_NOARP is used
* to turn off for all the logicals, it is here instead
* of the ipif.
*/
/* Peer ill of an IPMP move operation */
struct ill_s *ill_move_peer;
struct ill_s *ill_group_next;
/*
* Reverse tunnel related count. This count
* determines how many mobile nodes are using this
* ill to send packet to reverse tunnel via foreign
* agent. A non-zero count specifies presence of
* mobile node(s) using reverse tunnel through this
* interface.
*/
/*
* This count is bumped up when a route is added with
* RTA_SRCIFP bit flag using routing socket.
*/
/*
* Pointer to the special interface based routing table.
* This routing table is created dynamically when RTA_SRCIFP
* is set by the routing socket.
*/
/*
* The ill_nd_lla* fields handle the link layer address option
* from neighbor discovery. This is used for external IPv6
* address resolution.
*/
/*
* We now have 3 phys_addr_req's sent down. This field keeps track
* of which one is pending.
*/
/*
* Used to save errors that occur during plumbing
*/
void *ill_fastpath_list; /* both ire and nce hang off this */
/*
* Contains the upper read queue pointer of the module immediately
* beneath IP. This field allows IP to validate sub-capability
* acknowledgments coming up from downstream.
*/
#ifdef ILL_DEBUG
#endif
} ill_t;
extern void ill_delete_glist(ill_t *);
/*
* The following table lists the protection levels of the various members
* of the ill_t. Same notation as that used for ipif_t above is used.
*
* Write Read
*
* ill_ifptr ill_g_lock + s Write once
* ill_rq ipsq Write once
* ill_wq ipsq Write once
*
* ill_error ipsq None
* ill_ipif ill_g_lock + ipsq ill_g_lock OR ipsq
* ill_ipif_up_count ill_lock + ipsq ill_lock OR ipsq
* ill_max_frag ipsq Write once
*
* ill_name ill_g_lock + ipsq Write once
* ill_name_length ill_g_lock + ipsq Write once
* ill_ndd_name ipsq Write once
* ill_net_type ipsq Write once
* ill_ppa ill_g_lock + ipsq Write once
* ill_sap ipsq + down ill Write once
* ill_sap_length ipsq + down ill Write once
* ill_phys_addr_length ipsq + down ill Write once
*
* ill_bcast_addr_length ipsq ipsq
* ill_mactype ipsq ipsq
* ill_frag_ptr ipsq ipsq
*
* ill_frag_timer_id ill_lock ill_lock
* ill_frag_hash_tbl ipsq up ill
* ill_ilm ipsq + ill_lock ill_lock
* ill_mcast_type ill_lock ill_lock
* ill_mcast_v1_time ill_lock ill_lock
* ill_mcast_v2_time ill_lock ill_lock
* ill_mcast_v1_tset ill_lock ill_lock
* ill_mcast_v2_tset ill_lock ill_lock
* ill_mcast_rv ill_lock ill_lock
* ill_mcast_qi ill_lock ill_lock
* ill_pending_mp ill_lock ill_lock
*
* ill_bcast_mp ipsq ipsq
* ill_resolver_mp ipsq only when ill is up
* ill_down_mp ipsq ipsq
* ill_dlpi_deferred ill_lock ill_lock
* ill_dlpi_pending ill_lock ill_lock
* ill_phys_addr_mp ipsq + down ill only when ill is up
* ill_phys_addr ipsq + down ill only when ill is up
*
* ill_state_flags ill_lock ill_lock
* exclusive bit flags ipsq_t ipsq_t
* shared bit flags ill_lock ill_lock
*
* ill_arp_muxid ipsq Not atomic
* ill_ip_muxid ipsq Not atomic
*
* ill_ipf_gen Not atomic
* ill_frag_count Approx. not protected
* ill_type ipsq + down ill only when ill is up
* ill_dlpi_multicast_state ill_lock ill_lock
* ill_dlpi_fastpath_state ill_lock ill_lock
* ill_max_hops ipsq Not atomic
*
* ill_max_mtu
*
* ill_reachable_time ipsq + ill_lock ill_lock
* ill_reachable_retrans_time ipsq + ill_lock ill_lock
* ill_max_buf ipsq + ill_lock ill_lock
*
* Next 2 fields need ill_lock because of the get ioctls. They should not
* report partially updated results without executing in the ipsq.
* ill_token ipsq + ill_lock ill_lock
* ill_token_length ipsq + ill_lock ill_lock
* ill_xmit_count ipsq + down ill write once
* ill_ip6_mib ipsq + down ill only when ill is up
* ill_icmp6_mib ipsq + down ill only when ill is up
* ill_arp_down_mp ipsq ipsq
* ill_arp_del_mapping_mp ipsq ipsq
* ill_arp_on_mp ipsq ipsq
* ill_move_peer ipsq ipsq
*
* ill_phyint ipsq, ill_g_lock, ill_lock Any of them
* ill_flags ill_lock ill_lock
* ill_group ipsq, ill_g_lock, ill_lock Any of them
* ill_group_next ipsq, ill_g_lock, ill_lock Any of them
* ill_mrtun_refcnt ill_lock ill_lock
* ill_srcif_refcnt ill_lock ill_lock
* ill_srcif_table ill_lock ill_lock
* ill_nd_lla_mp ipsq + down ill only when ill is up
* ill_nd_lla ipsq + down ill only when ill is up
* ill_nd_lla_len ipsq + down ill only when ill is up
* ill_phys_addr_pend ipsq + down ill only when ill is up
* ill_ifname_pending_err ipsq ipsq
* ill_avl_byppa ipsq, ill_g_lock write once
*
* ill_fastpath_list ill_lock ill_lock
* ill_refcnt ill_lock ill_lock
* ill_ire_cnt ill_lock ill_lock
* ill_cv ill_lock ill_lock
* ill_ilm_walker_cnt ill_lock ill_lock
* ill_nce_cnt ill_lock ill_lock
* ill_trace ill_lock ill_lock
* ill_usesrc_grp_next ill_g_usesrc_lock ill_g_usesrc_lock
*/
/*
* For ioctl restart mechanism see ip_reprocess_ioctl()
*/
struct ip_ioctl_cmd_s;
struct ip_ioctl_cmd_s *, void *);
typedef struct ip_ioctl_cmd_s {
int ipi_cmd;
/*
* ipi_cmd_type:
*
* IF_CMD 1 old style ifreq cmd
* LIF_CMD 2 new style lifreq cmd
* MISC_CMD 3 Misc. (non [l]ifreq, tun) cmds
* TUN_CMD 4 tunnel related
*/
#define IPI_DONTCARE 0 /* For ioctl encoded values that don't matter */
/* Flag values in ipi_flags */
extern ip_ioctl_cmd_t ip_ndx_ioctl_table[];
extern ip_ioctl_cmd_t ip_misc_ioctl_table[];
extern int ip_ndx_ioctl_count;
extern int ip_misc_ioctl_count;
#define ILL_CLEAR_MOVE(ill) { \
\
}
typedef struct ipmx_s {
ipmx_notused : 31;
} ipmx_t;
/*
* State for detecting if a driver supports certain features.
* Support for DL_ENABMULTI_REQ uses ill_dlpi_multicast_state.
* Support for DLPI M_DATA fastpath uses ill_dlpi_fastpath_state.
* Support for DL_CAPABILITY_REQ uses ill_dlpi_capab_state.
*/
#define IDS_UNKNOWN 0 /* No DLPI request sent */
/* Named Dispatch Parameter Management Structure */
typedef struct ipparam_s {
char *ip_param_name;
} ipparam_t;
/* Extended NDP Management Structure */
typedef struct ipndp_s {
char *ip_ndp_name;
} ipndp_t;
/*
* The kernel stores security attributes of all gateways in a database made
* up of one or more tsol_gcdb_t elements. Each tsol_gcdb_t contains the
* security-related credentials of the gateway. More than one gateways may
* share entries in the database.
*
* The tsol_gc_t structure represents the gateway to credential association,
* and refers to an entry in the database. One or more tsol_gc_t entities are
* grouped together to form one or more tsol_gcgrp_t, each representing the
* list of security attributes specific to the gateway. A gateway may be
* associated with at most one credentials group.
*/
struct tsol_gcgrp_s;
/*
* Gateway security credential record.
*/
typedef struct tsol_gcdb_s {
} tsol_gcdb_t;
/*
* Gateway to credential association.
*/
typedef struct tsol_gc_s {
} tsol_gc_t;
/*
* Gateway credentials group address.
*/
typedef struct tsol_gcgrp_addr_s {
int ga_af; /* address family */
/*
* Gateway credentials group.
*/
typedef struct tsol_gcgrp_s {
} tsol_gcgrp_t;
extern kmutex_t gcgrp_lock;
#define GC_REFRELE(p) { \
if (--((p)->gc_refcnt) == 0) \
gc_inactive(p); \
else \
}
#define GCGRP_REFHOLD(p) { \
mutex_enter(&gcgrp_lock); \
++((p)->gcgrp_refcnt); \
ASSERT((p)->gcgrp_refcnt != 0); \
mutex_exit(&gcgrp_lock); \
}
#define GCGRP_REFRELE(p) { \
mutex_enter(&gcgrp_lock); \
ASSERT((p)->gcgrp_refcnt > 0); \
if (--((p)->gcgrp_refcnt) == 0) \
gcgrp_inactive(p); \
mutex_exit(&gcgrp_lock); \
}
/*
* IRE gateway security attributes structure, pointed to by tsol_ire_gw_secattr
*/
struct tsol_tnrhc;
typedef struct tsol_ire_gw_secattr_s {
/*
* count of the IREs and IRBs (ire bucket).
*
* 1) We bump up the reference count of an IRE to make sure that
* it does not get deleted and freed while we are using it.
* Typically all the lookup functions hold the bucket lock,
* and look for the IRE. If it finds an IRE, it bumps up the
* reference count before dropping the lock. Sometimes we *may* want
* to bump up the reference count after we *looked* up i.e without
* holding the bucket lock. So, the IRE_REFHOLD macro does not assert
* on the bucket lock being held. Any thread trying to delete from
* the hash bucket can still do so but cannot free the IRE if
* ire_refcnt is not 0.
*
* 2) We bump up the reference count on the bucket where the IRE resides
* (IRB), when we want to prevent the IREs getting deleted from a given
* hash bucket. This makes life easier for ire_walk type functions which
* wants to walk the IRE list, call a function, but needs to drop
* the bucket lock to prevent recursive rw_enters. While the
* lock is dropped, the list could be changed by other threads or
* the same thread could end up deleting the ire or the ire pointed by
* ire_next. IRE_REFHOLDing the ire or ire_next is not sufficient as
* a delete will still remove the ire from the bucket while we have
* dropped the lock and hence the ire_next would be NULL. Thus, we
* need a mechanism to prevent deletions from a given bucket.
*
* To prevent deletions, we bump up the reference count on the
* bucket. If the bucket is held, ire_delete just marks IRE_MARK_CONDEMNED
* both on the ire's ire_marks and the bucket's irb_marks. When the
* reference count on the bucket drops to zero, all the CONDEMNED ires
* are deleted. We don't have to bump up the reference count on the
* bucket if we are walking the bucket and never have to drop the bucket
* lock. Note that IRB_REFHOLD does not prevent addition of new ires
* in the list. It is okay because addition of new ires will not cause
* ire_next to point to freed memory. We do IRB_REFHOLD only when
* all of the 3 conditions are true :
*
* 1) The code needs to walk the IRE bucket from start to end.
* 2) It may have to drop the bucket lock sometimes while doing (1)
* 3) It does not want any ires to be deleted meanwhile.
*/
/*
* Bump up the reference count on the IRE. We cannot assert that the
* bucket lock is being held as it is legal to bump up the reference
* count after the first lookup has returned the IRE without
* holding the lock. Currently ip_wput does this for caching IRE_CACHEs.
*/
#ifndef IRE_DEBUG
#define IRE_UNTRACE_REF(ire)
#define IRE_TRACE_REF(ire)
#else
#define IRE_REFHOLD_NOTR(ire) { \
}
#endif
#define IRE_REFHOLD(ire) { \
IRE_TRACE_REF(ire); \
}
#define IRE_REFHOLD_LOCKED(ire) { \
IRE_TRACE_REF(ire); \
(ire)->ire_refcnt++; \
}
/*
* Decrement the reference count on the IRE.
* In architectures e.g sun4u, where atomic_add_32_nv is just
* a cas, we need to maintain the right memory barrier semantics
* as that of mutex_exit i.e all the loads and stores should complete
* before the cas is executed. membar_exit() does that here.
*
* NOTE : This macro is used only in places where we want performance.
* To avoid bloating the code, we use the function "ire_refrele"
* which essentially calls the macro.
*/
#ifndef IRE_DEBUG
#define IRE_REFRELE(ire) { \
membar_exit(); \
ire_inactive(ire); \
}
#else
#define IRE_REFRELE(ire) { \
ire_untrace_ref(ire); \
membar_exit(); \
ire_inactive(ire); \
}
#define IRE_REFRELE_NOTR(ire) { \
membar_exit(); \
ire_inactive(ire); \
}
#endif
/*
* Bump up the reference count on the hash bucket - IRB to
* prevent ires from being deleted in this bucket.
*/
#define IRB_REFHOLD(irb) { \
(irb)->irb_refcnt++; \
}
#define IRB_REFHOLD_LOCKED(irb) { \
(irb)->irb_refcnt++; \
}
void irb_refrele_ftable(irb_t *);
/*
* Note: when IRB_MARK_FTABLE (i.e., IRE_CACHETABLE entry), the irb_t
* is statically allocated, so that when the irb_refcnt goes to 0,
* we simply clean up the ire list and continue.
*/
#define IRB_REFRELE(irb) { \
irb_refrele_ftable((irb)); \
} else { \
if (--(irb)->irb_refcnt == 0 && \
\
ire_cleanup(ire_list); \
} else { \
} \
} \
}
extern struct kmem_cache *rt_entry_cache;
/*
* Lock the fast path mp for access, since the fp_mp can be deleted
* due a DL_NOTE_FASTPATH_FLUSH in the case of IRE_BROADCAST and IRE_MIPRTUN
*/
#define LOCK_IRE_FP_MP(ire) { \
}
#define UNLOCK_IRE_FP_MP(ire) { \
}
typedef struct ire4 {
} ire4_t;
typedef struct ire6 {
} ire6_t;
typedef union ire_addr {
} ire_addr_u_t;
/* Internet Routing Entry */
typedef struct ire_s {
union {
} imf_u;
/*
* Neighbor Cache Entry for IPv6; arp info for IPv4
*/
/*
* Protects ire_uinfo, ire_max_frag, and ire_frag_flag.
*/
/*
* For regular routes in forwarding table and cache table the
* table and interface based forwarding table use these fields.
* Routes added with RTA_SRCIFP and RTA_SRC respectively have
* non-zero values for the following fields.
*/
/* source ip-addr of incoming packet */
/*
* ire's that are embedded inside mblk_t and sent to the external
* resolver use the ire_stq_ifindex to track the ifindex of the
* ire_stq, so that the ill (if it exists) can be correctly recovered
* for cleanup in the esbfree routine when arp failure occurs
*/
#ifdef IRE_DEBUG
#endif
} ire_t;
/* IPv4 compatiblity macros */
/* Convenient typedefs for sockaddrs */
typedef struct sockaddr_in sin_t;
typedef struct sockaddr_in6 sin6_t;
/* Address structure used for internal bind with IP */
typedef struct ipa_conn_s {
} ipa_conn_t;
typedef struct ipa6_conn_s {
} ipa6_conn_t;
/*
* Using ipa_conn_x_t or ipa6_conn_x_t allows us to modify the behavior of IP's
* bind handler.
*/
typedef struct ipa_conn_extended_s {
} ipa_conn_x_t;
typedef struct ipa6_conn_extended_s {
/* flag values for ipa_conn_x_t and ipa6_conn_x_t. */
typedef struct nv_s {
char *nv_name;
} nv_t;
/* IP Forwarding Ticket */
typedef struct ipftk_s {
} ipftk_t;
typedef struct ipt_s {
} ipt_t;
#define ILL_FRAG_HASH(s, i) \
/*
* The MAX number of allowed fragmented packets per hash bucket
* calculation is based on the most common mtu size of 1500. This limit
* will work well for other mtu sizes as well.
*/
#define COMMON_IP_MTU 1500
#define MAX_FRAG_MIN 10
#define MAX_FRAG_PKTS(ipst) \
/*
* Maximum dups allowed per packet.
*/
extern uint_t ip_max_frag_dups;
/*
* Per-packet information for received packets and transmitted.
* Used by the transport protocols when converting between the packet
* and ancillary data and socket options.
*
* Note: This private data structure and related IPPF_* constant
* definitions are exposed to enable compilation of some debugging tools
* a temporary hack and long term alternate interfaces should be defined
* to support the needs of such tools and private definitions moved to
* private headers.
*/
struct ip6_pkt_s {
struct ip6_mtuinfo *ipp_pathmtu;
};
/*
* This struct is used by ULP_opt_set() functions to return value of IPv4
* ancillary options. Currently this is only used by udp and icmp and only
* IP_PKTINFO option is supported.
*/
typedef struct ip4_pkt_s {
} ip4_pkt_t;
/*
* Used by ULP's to pass options info to ip_output
* currently only IP_PKTINFO is supported.
*/
typedef struct ip_opt_info_s {
/*
* value for ip_opt_flags
*/
#define IP_VERIFY_SRC 0x1
/*
* This structure is used to convey information from IP and the ULP.
* Currently used for the IP_RECVSLLA, IP_RECVIF and IP_RECVPKTINFO options.
* The type of information field is set to IN_PKTINFO (i.e inbound pkt info)
*/
typedef struct ip_pktinfo {
} ip_pktinfo_t;
/*
* flags to tell UDP what IP is sending; in_pkt_flags
*/
/*
* Inbound interface index + matched address.
* Used only by IPV4.
*/
#define IPF_RECVADDR 0x04
/* ipp_fields values */
#define IPPF_HOPLIMIT 0x0020
#define IPPF_HOPOPTS 0x0040
#define IPPF_RTHDR 0x0080
#define IPPF_RTDSTOPTS 0x0100
#define IPPF_DSTOPTS 0x0200
#define IPPF_NEXTHOP 0x0400
#define IPPF_PATHMTU 0x0800
#define IPPF_TCLASS 0x1000
#define IPPF_DONTFRAG 0x2000
#define IPPF_USE_MIN_MTU 0x04000
#define IPPF_MULTICAST_HOPS 0x08000
#define IPPF_UNICAST_HOPS 0x10000
#define IPPF_FRAGHDR 0x20000
#define IPPF_HAS_IP6I \
#define TCP_PORTS_OFFSET 0
#define UDP_PORTS_OFFSET 0
/*
* ill / ipif lookup functions increment the refcnt on the ill / ipif only
* after calling these macros. This ensures that the refcnt on the ipif or
* ill will eventually drop down to zero.
*/
#define ILL_CAN_LOOKUP(ill) \
#define ILL_CAN_WAIT(ill, q) \
#define ILL_CAN_LOOKUP_WALKER(ill) \
#define IPIF_CAN_LOOKUP(ipif) \
/*
* If the parameter 'q' is NULL, the caller is not interested in wait and
* restart of the operation if the ILL or IPIF cannot be looked up when it is
* marked as 'CHANGING'. Typically a thread that tries to send out data will
* end up passing NULLs as the last 4 parameters to ill_lookup_on_ifindex and
* in this case 'q' is NULL
*/
#define IPIF_CAN_WAIT(ipif, q) \
#define IPIF_CAN_LOOKUP_WALKER(ipif) \
/*
* These macros are used by critical set ioctls and failover ioctls to
* mark the ipif appropriately before starting the operation and to clear the
* marks after completing the operation.
*/
#define IPIF_UNMARK_MOVING(ipif) \
#define ILL_UNMARK_CHANGING(ill) \
/* Macros used to assert that this thread is a writer */
#define IAM_WRITER_ILL(ill) \
#define IAM_WRITER_IPIF(ipif) \
/*
* Grab ill locks in the proper order. The order is highest addressed
* ill is locked first.
*/
{ \
} else { \
} \
}
{ \
}
/* Get the other protocol instance ill */
#define MATCH_V4_ONLY 0x1
#define MATCH_V6_ONLY 0x2
#define MATCH_ILL_ONLY 0x4
/* ioctl command info: Ioctl properties extracted and stored in here */
typedef struct cmd_info_s
{
} cmd_info_t;
/*
* List of AH and ESP IPsec acceleration capable ills
*/
typedef struct ipsec_capab_ill_s {
struct ipsec_capab_ill_s *next;
extern struct kmem_cache *ire_cache;
extern ipaddr_t ip_g_all_ones;
extern vmem_t *ip_minor_arena;
/*
* ip_g_forward controls IP forwarding. It takes two values:
* 0: IP_FORWARD_NEVER Don't forward packets ever.
* 1: IP_FORWARD_ALWAYS Forward packets for elsewhere.
*
* RFC1122 says there must be a configuration switch to control forwarding,
* but that the default MUST be to not forward packets ever. Implicit
* control based on configuration of multiple interfaces MUST NOT be
* implemented (Section 3.1). SunOS 4.1 did provide the "automatic" capability
* and, in fact, it was the default. That capability is now provided in the
*/
/* IPv6 configuration knobs */
extern int dohwcksum; /* use h/w cksum if supported by the h/w */
#ifdef ZC_TEST
extern int noswcksum;
#endif
extern char ipif_loopback_name[];
extern nv_t *ire_nv_tbl;
extern struct module_info ip_mod_info;
#define HOOKS4_INTERESTED_PHYSICAL_IN(ipst) \
#define HOOKS6_INTERESTED_PHYSICAL_IN(ipst) \
#define HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) \
#define HOOKS6_INTERESTED_PHYSICAL_OUT(ipst) \
#define HOOKS4_INTERESTED_FORWARDING(ipst) \
#define HOOKS6_INTERESTED_FORWARDING(ipst) \
#define HOOKS4_INTERESTED_LOOPBACK_IN(ipst) \
#define HOOKS6_INTERESTED_LOOPBACK_IN(ipst) \
#define HOOKS4_INTERESTED_LOOPBACK_OUT(ipst) \
#define HOOKS6_INTERESTED_LOOPBACK_OUT(ipst) \
/*
* Hooks marcos used inside of ip
*/
#define IPHA_VHL ipha_version_and_hdr_length
\
if ((_hook).he_interested) { \
\
\
else \
\
else \
ipst->ips_netstack) != 0) { \
ip2dbg(("%s hook dropped mblk chain %p hdr %p\n",\
} \
} else { \
} \
}
\
if ((_hook).he_interested) { \
\
\
else \
\
else \
ipst->ips_netstack) != 0) { \
ip2dbg(("%s hook dropped mblk chain %p hdr %p\n",\
} \
} else { \
} \
}
/*
* Network byte order macros
*/
#ifdef _BIG_ENDIAN
#define N_IN_CLASSD_NET IN_CLASSD_NET
#else /* _BIG_ENDIAN */
#endif /* _BIG_ENDIAN */
#define IP_LOOPBACK_ADDR(addr) \
#ifdef DEBUG
/* IPsec HW acceleration debugging support */
#define IPSECHW_CALL(f, r, x) if (ipsechw_debug & (f)) { (void) r x; }
extern uint32_t ipsechw_debug;
#else
#define IPSECHW_DEBUG(f, x) {}
#define IPSECHW_CALL(f, r, x) {}
#endif
extern int ip_debug;
#ifdef IP_DEBUG
#else
#define ip0dbg(a) /* */
#define ip1dbg(a) /* */
#define ip2dbg(a) /* */
#define ip3dbg(a) /* */
#endif /* IP_DEBUG */
/* Default MAC-layer address string length for mac_colon_addr */
#define MAC_STR_LEN 128
struct ipsec_out_s;
struct mac_header_info_s;
extern const char *dlpi_prim_str(int);
extern const char *dlpi_err_str(int);
extern void ill_frag_timer(void *);
extern void ill_frag_timer_start(ill_t *);
extern void ill_nic_info_dispatch(ill_t *);
extern char *ip_dot_addr(ipaddr_t, char *);
ip_stack_t *);
ip_stack_t *);
ip_stack_t *);
extern void ip_ire_g_fini(void);
extern void ip_ire_g_init(void);
extern void ip_ire_fini(ip_stack_t *);
extern void ip_ire_init(ip_stack_t *);
size_t);
int, int, mblk_t *);
struct mac_header_info_s *);
extern int ip_snmpmod_close(queue_t *);
extern void ip_mib2_add_ip_stats(mib2_ipIfStatsEntry_t *,
extern void ip_mib2_add_icmp6_stats(mib2_ipv6IfIcmpEntry_t *,
extern void ip_trash_ire_reclaim(void *);
extern void ip_trash_timer_expire(void *);
extern void ip_output_options(void *, mblk_t *, void *, int,
ip_opt_info_t *);
zoneid_t);
int, zoneid_t);
extern char *ip_nv_lookup(nv_t *, int);
zoneid_t, ip_stack_t *);
extern struct qinit rinit_ipv6;
extern struct qinit winit_ipv6;
extern struct qinit rinit_tcp6;
extern struct qinit rinit_acceptor_tcp;
extern struct qinit winit_acceptor_tcp;
ire_t *);
extern int ipsec_out_extra_length(mblk_t *);
extern int ipsec_in_extra_length(mblk_t *);
extern void ipsec_out_to_in(mblk_t *);
extern void ire_cleanup(ire_t *);
extern void ire_inactive(ire_t *);
#ifdef IRE_DEBUG
#endif
#ifdef ILL_DEBUG
extern void ill_trace_cleanup(ill_t *);
extern void ipif_trace_cleanup(ipif_t *);
#endif
ip_stack_t *);
extern void ip_quiesce_conn(conn_t *);
uint_t);
uint_t);
extern void conn_ioctl_cleanup(conn_t *);
struct multidata_s;
struct pdesc_s;
ill_lso_capab_t *);
const mblk_t *);
uint_t);
extern void tnet_init(void);
extern void tnet_fini(void);
/* Hooks for CGTP (multirt routes) filtering module */
#define CGTP_FILTER_REV_1 1
#define CGTP_FILTER_REV_2 2
#define CGTP_FILTER_REV CGTP_FILTER_REV_2
/* cfo_filter, cfo_filter_fp, cfo_filter_v6 hooks return values */
#define CGTP_IP_PKT_NOT_CGTP 0
#define CGTP_IP_PKT_PREMIUM 1
#define CGTP_IP_PKT_DUPLICATE 2
typedef struct cgtp_filter_ops {
int cfo_filter_rev;
int (*cfo_change_state)(int);
in6_addr_t *);
#define CGTP_MCAST_SUCCESS 1
/*
* The separate CGTP module needs these as globals. It uses the first
* to unregister (since there is no ip_cgtp_filter_unregister() function)
* and it uses the second one to verify that the filter has been
* turned off (a ip_cgtp_filter_active() function would be good for that.)
*/
extern cgtp_filter_ops_t *ip_cgtp_filter_ops;
extern boolean_t ip_cgtp_filter;
extern int ip_cgtp_filter_supported(void);
extern int ip_cgtp_filter_register(cgtp_filter_ops_t *);
/* Flags for ire_multirt_lookup() */
#define MULTIRT_USESTAMP 0x0001
#define MULTIRT_SETSTAMP 0x0002
#define MULTIRT_CACHEGW 0x0004
/* Debug stuff for multirt route resolution. */
/* Our "don't send, rather drop" flag. */
#define MULTIRT_DEBUG_FLAG 0x8000
#define MULTIRT_TRACE(x) ip2dbg(x)
#define MULTIRT_DEBUG_TAG(mblk) \
do { \
MULTIRT_TRACE(("%s[%d]: tagging mblk %p, tag was %d\n", \
} while (0)
#define MULTIRT_DEBUG_UNTAG(mblk) \
do { \
MULTIRT_TRACE(("%s[%d]: untagging mblk %p, tag was %d\n", \
} while (0)
#define MULTIRT_DEBUG_TAGGED(mblk) \
#else
#endif
/*
* Per-ILL Multidata Transmit capabilities.
*/
struct ill_mdt_capab_s {
};
struct ill_hcksum_capab_s {
};
struct ill_zerocopy_capab_s {
};
struct ill_lso_capab_s {
};
/* Possible ill_states */
#define ILL_RING_FREE 0 /* Available to be assigned to Ring */
/*
* we need to duplicate the definitions here because we cannot
*/
typedef void (*ip_dld_tx_t)(void *, mblk_t *);
typedef void (*ip_dls_chg_soft_ring_t)(void *, int);
typedef void (*ip_dls_bind_t)(void *, processorid_t);
typedef void (*ip_dls_unbind_t)(void *);
struct ill_rx_ring {
void *rr_handle; /* Handle for Rx ring */
};
struct ill_dls_capab_s {
void *ill_tx_handle; /* Driver Tx handle */
/* change soft ring fanout */
};
/*
* This message is sent by an upper-layer protocol to tell IP that it knows all
* about labels and will construct them itself. IP takes the slow path and
* recomputes the label on every packet when this isn't true.
*/
typedef struct out_labeled_s {
/*
* IP squeues exports
*/
extern int ip_squeue_profile;
extern int ip_squeue_bind;
extern boolean_t ip_squeue_fanout;
extern boolean_t ip_squeue_soft_ring;
extern uint_t ip_threads_per_cpu;
extern uint_t ip_squeues_per_cpu;
extern uint_t ip_soft_rings_cnt;
typedef struct squeue_set_s {
int sqs_size;
int sqs_max_size;
} squeue_set_t;
#define IP_SQUEUE_GET(hint) \
extern void ip_squeue_init(void (*)(squeue_t *));
extern void ip_squeue_clean_all(ill_t *);
mblk_t *, struct mac_header_info_s *);
extern void ip_resume_tcp_bind(void *, mblk_t *, void *);
struct ip6_mtuinfo *, netstack_t *);
/*
* Squeue tags. Tags only need to be unique when the callback function is the
* same to distinguish between different calls, but we use unique tags for
* convenience anyway.
*/
#define SQTAG_IP_INPUT 1
#define SQTAG_TCP_INPUT_ICMP_ERR 2
#define SQTAG_TCP6_INPUT_ICMP_ERR 3
#define SQTAG_IP_TCP_INPUT 4
#define SQTAG_IP6_TCP_INPUT 5
#define SQTAG_IP_TCP_CLOSE 6
#define SQTAG_TCP_OUTPUT 7
#define SQTAG_TCP_TIMER 8
#define SQTAG_TCP_TIMEWAIT 9
#define SQTAG_TCP_ACCEPT_FINISH 10
#define SQTAG_TCP_ACCEPT_FINISH_Q0 11
#define SQTAG_TCP_ACCEPT_PENDING 12
#define SQTAG_TCP_LISTEN_DISCON 13
#define SQTAG_TCP_CONN_REQ_1 14
#define SQTAG_TCP_EAGER_BLOWOFF 15
#define SQTAG_TCP_EAGER_CLEANUP 16
#define SQTAG_TCP_EAGER_CLEANUP_Q0 17
#define SQTAG_TCP_CONN_IND 18
#define SQTAG_TCP_RSRV 19
#define SQTAG_TCP_ABORT_BUCKET 20
#define SQTAG_TCP_REINPUT 21
#define SQTAG_TCP_REINPUT_EAGER 22
#define SQTAG_TCP_INPUT_MCTL 23
#define SQTAG_TCP_RPUTOTHER 24
#define SQTAG_IP_PROTO_AGAIN 25
#define SQTAG_IP_FANOUT_TCP 26
#define SQTAG_IPSQ_CLEAN_RING 27
#define SQTAG_TCP_WPUT_OTHER 28
#define SQTAG_TCP_CONN_REQ_UNBOUND 29
#define SQTAG_TCP_SEND_PENDING 30
#define SQTAG_BIND_RETRY 31
#define SQTAG_UDP_FANOUT 32
#define SQTAG_UDP_INPUT 33
#define SQTAG_UDP_WPUT 34
#define SQTAG_UDP_OUTPUT 35
#define SQTAG_TCP_KSSL_INPUT 36
#define SQTAG_TCP_DROP_Q0 37
#define SQTAG_TCP_CONN_REQ_2 38
#define NOT_OVER_IP(ip_wq) \
IP_MOD_NAME) != 0 || \
#endif /* _KERNEL */
#ifdef __cplusplus
}
#endif
#endif /* _INET_IP_H */