spd.c revision d2f8a3dfec697dbc0b43dfc6265ae2fa615da951
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* IPsec Security Policy Database.
*
* This module maintains the SPD and provides routines used by ip and ip6
* to apply IPsec policy to inbound and outbound datagrams.
*/
#include <sys/sysmacros.h>
#include <net/pfpolicy.h>
#include <inet/ipsec_info.h>
#include <inet/ipsec_impl.h>
#include <inet/ipsecesp.h>
#include <inet/ipclassifier.h>
static void ipsec_update_present_flags(ipsec_stack_t *);
netstack_t *);
static void ipsec_out_free(void *);
static void ipsec_in_free(void *);
ipsec_selector_t *, netstack_t *);
ipsec_selector_t *, netstack_t *);
static void ipsec_in_release_refs(ipsec_in_t *);
static void ipsec_out_release_refs(ipsec_out_t *);
static void ipsec_action_free_table(ipsec_action_t *);
static void ipsec_action_reclaim(void *);
static void ipsec_action_reclaim_stack(netstack_t *);
static void ipsid_init(netstack_t *);
static void ipsid_fini(netstack_t *);
/* sel_flags values for ipsec_init_inbound_sel(). */
#define SEL_NONE 0x0000
#define SEL_PORT_POLICY 0x0001
#define SEL_IS_ICMP 0x0002
#define SEL_TUNNEL_MODE 0x0004
/* Return values for ipsec_init_inbound_sel(). */
kstat_named_t **);
static void ipsec_unregister_prov_update(void);
static void ipsec_kstat_destroy(ipsec_stack_t *);
static int ipsec_free_tables(ipsec_stack_t *);
static int tunnel_compare(const void *, const void *);
static void ipsec_freemsg_chain(mblk_t *);
struct kstat_named *, ipdropper_t *);
static void ipsec_kstat_destroy(ipsec_stack_t *);
static int ipsec_free_tables(ipsec_stack_t *);
static int tunnel_compare(const void *, const void *);
static void ipsec_freemsg_chain(mblk_t *);
struct kstat_named *, ipdropper_t *);
/*
* Selector hash table is statically sized at module load time.
* we default to 251 buckets, which is the largest prime number under 255
*/
#define IPSEC_SPDHASH_DEFAULT 251
/* SPD hash-size tunable per tunnel. */
#define TUN_SPDHASH_DEFAULT 5
#define IPSEC_SEL_NOHASH ((uint32_t)(~0))
/*
* Handle global across all stack instances
*/
static kmem_cache_t *ipsec_action_cache;
static kmem_cache_t *ipsec_sel_cache;
static kmem_cache_t *ipsec_pol_cache;
static kmem_cache_t *ipsec_info_cache;
/* Frag cache prototypes */
static void ipsec_fragcache_clean(ipsec_fragcache_t *);
static ipsec_fragcache_entry_t *fragcache_delentry(int,
void ipsec_fragcache_uninit(ipsec_fragcache_t *);
ipsec_stack_t *);
int ipsec_hdr_pullup_needed = 0;
int ipsec_weird_null_inbound_policy = 0;
/*
* Inbound traffic should have matching identities for both SA's.
*/
/*
* IPv4 Fragments
*/
#define IS_V4_FRAGMENT(ipha_fragment_offset_and_flags) \
/*
* IPv6 Fragments
*/
/*
* Policy failure messages.
*/
static char *ipsec_policy_failure_msgs[] = {
/* IPSEC_POLICY_NOT_NEEDED */
"%s: Dropping the datagram because the incoming packet "
"is %s, but the recipient expects clear; Source %s, "
"Destination %s.\n",
/* IPSEC_POLICY_MISMATCH */
"%s: Policy Failure for the incoming packet (%s); Source %s, "
"Destination %s.\n",
/* IPSEC_POLICY_AUTH_NOT_NEEDED */
"%s: Authentication present while not expected in the "
"incoming %s packet; Source %s, Destination %s.\n",
/* IPSEC_POLICY_ENCR_NOT_NEEDED */
"%s: Encryption present while not expected in the "
"incoming %s packet; Source %s, Destination %s.\n",
/* IPSEC_POLICY_SE_NOT_NEEDED */
"%s: Self-Encapsulation present while not expected in the "
"incoming %s packet; Source %s, Destination %s.\n",
};
/*
* General overviews:
*
* Locking:
*
* All of the system policy structures are protected by a single
* rwlock. These structures are threaded in a
* fairly complex fashion and are not expected to change on a
* regular basis, so this should not cause scaling/contention
* problems. As a result, policy checks should (hopefully) be MT-hot.
*
* Allocation policy:
*
* We use custom kmem cache types for the various
* bits & pieces of the policy data structures. All allocations
* use KM_NOSLEEP instead of KM_SLEEP for policy allocation. The
* policy table is of potentially unbounded size, so we don't
* want to provide a way to hog all system memory with policy
* entries..
*/
/* Convenient functions for freeing or dropping a b_next linked mblk chain */
/* Free all messages in an mblk chain */
static void
{
}
}
/* ip_drop all messages in an mblk chain */
static void
{
}
}
/*
* AVL tree comparison function.
* the in-kernel avl assumes unique keys for all objects.
* Since sometimes policy will duplicate rules, we may insert
* multiple rules with the same rule id, so we need a tie-breaker.
*/
static int
ipsec_policy_cmpbyid(const void *a, const void *b)
{
ipa = (const ipsec_policy_t *)a;
ipb = (const ipsec_policy_t *)b;
return (-1);
return (1);
/*
* Tie-breaker #1: All installed policy rules have a non-NULL
* ipsl_sel (selector set), so an entry with a NULL ipsp_sel is not
* actually in-tree but rather a template node being used in
* an avl_find query; see ipsec_policy_delete(). This gives us
* a placeholder in the ordering just before the the first entry with
* a key >= the one we're looking for, so we can walk forward from
* that point to get the remaining entries with the same id.
*/
return (-1);
return (1);
/*
* At most one of the arguments to the comparison should have a
* NULL selector pointer; if not, the tree is broken.
*/
/*
* Tie-breaker #2: use the virtual address of the policy node
* to arbitrarily break ties. Since we use the new tree node in
* the avl_find() in ipsec_insert_always, the new node will be
* inserted into the tree in the right place in the sequence.
*/
return (-1);
return (1);
return (0);
}
/*
* Free what ipsec_alloc_table allocated.
*/
void
{
int dir;
int i;
continue;
for (i = 0; i < ipr->ipr_nchains; i++) {
}
sizeof (ipsec_policy_hash_t));
}
}
void
{
int dir;
int chain;
}
}
/*
* Free the IPsec stack instance.
*/
/* ARGSUSED */
static void
{
void *cookie;
int i;
/*
* It's possible we can just ASSERT() the tree is empty. After all,
* we aren't called until IP is ready to unload (and presumably all
* tunnels have been unplumbed). But we'll play it safe for now, the
* loop will just exit immediately if it's empty.
*/
while ((node = (ipsec_tun_pol_t *)
}
/*
* Globals start with ref == 1 to prevent IPPH_REFRELE() from
* attempting to free them, hence they should have 1 now.
*/
for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
}
for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
}
for (i = 0; i < nalgs; i++) {
}
}
ipsid_fini(ns);
(void) ipsec_free_tables(ipss);
}
void
ipsec_policy_g_destroy(void)
{
}
/*
* Free what ipsec_alloc_tables allocated.
* Called when table allocation fails to free the table.
*/
static int
{
int i;
for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
}
sizeof (*ipss->ipsec_sel_hash));
ipss->ipsec_spd_hashsize = 0;
}
return (ENOMEM);
}
/*
* Attempt to allocate the tables in a single policy head.
* Return nonzero on failure after cleaning up any work in progress.
*/
int
{
int dir;
sizeof (ipsec_policy_hash_t), kmflag);
return (global_cleanup ?
ENOMEM);
}
return (0);
}
/*
* Attempt to allocate the various tables. Return nonzero on failure
* after cleaning up any work in progress.
*/
static int
{
int error;
if (error != 0)
return (error);
if (error != 0)
return (error);
return (ipsec_free_tables(ipss));
return (0);
}
/*
* After table allocation, initialize a policy head.
*/
void
{
}
}
}
static boolean_t
{
return (B_FALSE);
return (B_TRUE);
}
static void
{
}
/*
* Initialize the IPsec stack instance.
*/
/* ARGSUSED */
static void *
{
int i;
/*
* FIXME: netstack_ipsec is used by some of the routines we call
* below, but it isn't set until this routine returns.
* Either we introduce optional xxx_stack_alloc() functions
* that will be called by the netstack framework before xxx_stack_init,
* (latter has some include file order issues for sadb.h, but makes
* sense if we merge some of the ipsec related stack_t's together.
*/
/*
* Make two attempts to allocate policy hash tables; try it at
* then fall back to the default size.
*/
"Unable to allocate %d entry IPsec policy hash table",
}
/* Just set a default for tunnels. */
ipsid_init(ns);
/*
* Globals need ref == 1 to prevent IPPH_REFRELE() from attempting
* to free them.
*/
sizeof (ipsec_tun_pol_t), 0);
for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++)
for (i = 0; i < ipss->ipsec_spd_hashsize; i++)
for (i = 0; i < IPSEC_NALGTYPES; i++) {
ipss->ipsec_nalgs[i] = 0;
}
/* Set function to dummy until tun is loaded */
/* IP's IPsec code calls the packet dropper */
(void) ipsec_kstat_init(ipss);
return (ipss);
}
/* Global across all stack instances */
void
ipsec_policy_g_init(void)
{
/*
* We want to be informed each time a stack is created or
* destroyed in the kernel, so we can maintain the
* set of ipsec_stack_t's.
*/
}
/*
* Sort algorithm lists.
*
* I may need to split this based on
* authentication/encryption, and I may wish to have an administrator
* configure this list. Hold on to some NDD variables...
*
* XXX For now, sort on minimum key size (GAG!). While minimum key size is
* not the ideal metric, it's the only quantifiable measure available.
* We need a better metric for sorting algorithms by preference.
*/
static void
{
uint_t i;
for (i = 0; i < count - 1; i++) {
/*
* If you want to give precedence to newly added algs,
* add the = in the > comparison.
*/
/* Swap sortlist[i] and holder. */
} /* Else just continue. */
}
/* Store holder in last slot. */
}
/*
* Remove an algorithm from a sorted algorithm list.
* This should be considerably easier, even with complex sorting.
*/
static void
{
int i;
for (i = 0; i <= newcount; i++) {
if (copyback) {
}
}
}
/*
* Add the specified algorithm to the algorithm tables.
* Must be called while holding the algorithm table writer lock.
*/
void
{
}
/*
* Remove the specified algorithm from the algorithm tables.
* Must be called while holding the algorithm table writer lock.
*/
void
{
}
/*
* Hooks for spdsock to get a grip on system policy.
*/
{
IPPH_REFHOLD(h);
return (h);
}
{
IPPH_REFHOLD(h);
return (h);
}
/*
* Lock inactive policy, then active policy, then exchange policy root
* pointers.
*/
void
netstack_t *ns)
{
}
}
}
}
}
/*
*/
void
{
}
/*
* Clone one policy rule..
*/
static ipsec_policy_t *
{
return (NULL);
/*
* Adjust refcounts of cloned state.
*/
return (dst);
}
void
{
void *node;
}
static int
{
return (ENOMEM);
}
return (0);
}
/*
* Make one policy head look exactly like another.
*
* As with ipsec_swap_policy, we lock the destination policy head first, then
* the source policy head. Note that we only need to read-lock the source
* policy head as we are not changing it.
*/
int
netstack_t *ns)
{
goto abort_copy;
}
if (ipsec_copy_chain(dph,
goto abort_copy;
}
}
return (0);
return (ENOMEM);
}
/*
* Clone currently active policy to the inactive policy list.
*/
int
{
}
/*
* Generic "do we have IPvN policy" answer.
*/
{
int i, hval;
if (v6) {
hval = IPSEC_AF_V6;
} else {
hval = IPSEC_AF_V4;
}
return (B_TRUE);
for (i = 0; i < ipr->ipr_nchains; i++) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Extract the string from ipsec_policy_failure_msgs[type] and
* log it.
*
*/
void
{
char sbuf[INET6_ADDRSTRLEN];
char dbuf[INET6_ADDRSTRLEN];
char *s;
char *d;
} else {
}
/* Always bump the policy failure counter. */
}
/*
* Rate-limiting front-end to strlog() for AH and ESP. Uses the ndd variables
* knob to turn to throttle the rate of messages.
*/
void
char *fmt, ...)
{
sl |= SL_CONSOLE;
/*
* Throttle logging to stop syslog from being swamped. If variable
* 'ipsec_policy_log_interval' is zero, don't log any messages at
* all, otherwise log only one message every 'ipsec_policy_log_interval'
* msec. Convert interval (in msec) to hrtime (in nsec).
*/
if (ipst->ips_ipsec_policy_log_interval) {
if (ipss->ipsec_policy_failure_last +
}
}
}
void
{
}
/*
* algorithm.
*/
static void
{
/*
* If passed-in minbits is zero, we assume the caller trusts
* us with setting the minimum key size. We pick the
* algorithms DEFAULT key size for the minimum in this case.
*/
if (*minbits == 0) {
} else {
algp->alg_minbits);
}
if (*maxbits == 0)
else
algp->alg_maxbits);
} else {
*minbits = 0;
*maxbits = 0;
}
}
/*
* Check an action's requested algorithms against the algorithms currently
* loaded in the system.
*/
{
if (ipp->ipp_use_ah &&
return (B_FALSE);
}
if (ipp->ipp_use_espa &&
NULL) {
return (B_FALSE);
}
if (ipp->ipp_use_esp &&
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
/* TODO: sanity check lifetimes */
return (B_TRUE);
}
/*
* Set up a single action during wildcard expansion..
*/
static void
{
}
/*
* combinatoric expansion time: expand a wildcarded action into an
* array of wildcarded actions; we return the exploded action list,
* and return a count in *nact (output only).
*/
static ipsec_act_t *
{
*nact = 1;
return (outact);
}
/*
* compute the combinatoric explosion..
*
* we assume a request for encr if esp_req is PREF_REQUIRED
* we assume a request for ah auth if ah_req is PREF_REQUIRED.
* we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED
*/
action_count = 1;
/*
* set up for explosion.. for each dimension, expand output
* size by the explosion factor.
*
* Don't include the "any" algorithms, if defined, as no
* kernel policies should be set for these algorithms.
*/
if (wild) { \
nalgs--; \
action_count *= nalgs; \
min = 0; \
}
/*
* ok, allocate the whole mess..
*/
return (NULL);
/*
* Now compute all combinations. Note that non-wildcarded
* dimensions just get a single value from auth_min, while
* wildcarded dimensions indirect through the sortlist.
*
* We do encryption outermost since, at this time, there's
* greater difference in security and performance between
* encryption algorithms vs. authentication algorithms.
*/
ai = 0;
continue;
continue;
eauth_idx++) {
continue;
ai++;
}
}
}
*nact = action_count;
return (outact);
}
/*
* Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t.
*/
static void
{
/*
* ipp_use_* are bitfields. Look at "!!" in the following as a
* "boolean canonicalization" operator.
*/
/*
* SADB_AALG_ANY is a placeholder to distinguish "any" from
* "none" above. If auth is required, as determined above,
* SADB_AALG_ANY becomes 0, which is the representation
* of "any" and "none" in PF_KEY v2.
*/
req->ipsr_auth_alg : 0;
req->ipsr_esp_auth_alg : 0;
}
/*
* Extract a new-style action from a request.
*/
void
netstack_t *ns)
{
} else {
}
}
/*
* Convert a new-style "prot" back to an ipsec_req_t (more backwards compat).
* We assume caller has already zero'ed *req for us.
*/
static int
{
if (ipp->ipp_use_unique) {
}
if (ipp->ipp_use_se)
if (ipp->ipp_use_ah)
if (ipp->ipp_use_esp)
return (sizeof (*req));
}
/*
* Convert a new-style action back to an ipsec_req_t (more backwards compat).
* We assume caller has already zero'ed *req for us.
*/
static int
{
case IPSEC_ACT_BYPASS:
return (sizeof (*req));
case IPSEC_ACT_APPLY:
}
return (sizeof (*req));
}
/*
* Convert a new-style action back to an ipsec_req_t (more backwards compat).
* We assume caller has already zero'ed *req for us.
*/
int
{
ipsec_policy_t *p;
/*
* FULL-PERSOCK: consult hash table, too?
*/
p != NULL;
}
return (sizeof (*req));
}
/*
* Based on per-socket or latched policy, convert to an appropriate
* IP_SEC_OPT ipsec_req_t for the socket option; return size so we can
* be tail-called from ip.
*/
int
{
int rv = sizeof (ipsec_req_t);
/*
* Find appropriate policy. First choice is latched action;
* failing that, see latched policy; failing that,
* look at configured policy.
*/
goto done;
}
req);
goto done;
}
}
done:
return (rv);
}
void
{
}
/*
* When outbound policy is not cached, look it up the hard way and attach
* an ipsec_out_t to the packet..
*/
static mblk_t *
netstack_t *ns)
{
ipsec_policy_t *p;
if (p == NULL)
return (NULL);
}
/*
* We have an ipsec_out already, but don't have cached policy; fill it in
* with the right actions.
*/
static mblk_t *
{
ipsec_policy_t *p;
io->ipsec_out_policy = p;
}
return (ipsec_mp);
}
/*
* Consumes a reference to ipsp.
*/
static mblk_t *
{
ipsec_in_t *ii;
netstack_t *ns;
if (!mctl_present)
return (first_mp);
/*
* We should do an actual policy check here. Revisit this
* when we revisit the IPsec API. (And pass a conn_t in when we
* get there.)
*/
return (first_mp);
}
/*
* Check that packet's inbound ports & proto match the selectors
* expected by the SAs it traversed on the way in.
*/
static boolean_t
{
return (B_TRUE);
/*
* The pkt_unique check will also check for tunnel mode on the SA
* vs. the tunneled_packet boolean. "Be liberal in what you receive"
* should not apply in this case. ;)
*/
if (ah_mask != 0 &&
*reason = "AH inner header mismatch";
return (B_FALSE);
}
if (esp_mask != 0 &&
*reason = "ESP inner header mismatch";
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
{
if (ii->ipsec_in_loopback) {
/*
* Besides accepting pointer-equivalent actions, we also
* accept any ICMP errors we generated for ourselves,
* regardless of policy. If we do not wish to make this
* assumption in the future, check here, and where
* ipsec_out_icmp_loopback.)
*/
return (B_TRUE);
/* Deep compare necessary here?? */
*reason = "loopback policy mismatch";
return (B_FALSE);
}
case IPSEC_ACT_DISCARD:
case IPSEC_ACT_REJECT:
/* Should "fail hard" */
*reason = "blocked by policy";
return (B_FALSE);
case IPSEC_ACT_BYPASS:
case IPSEC_ACT_CLEAR:
*reason = "expected clear, got protected";
return (B_FALSE);
case IPSEC_ACT_APPLY:
/*
* As of now we do the simple checks of whether
* the datagram has gone through the required IPSEC
* protocol constraints or not. We might have more
* in the future like sensitive levels, key bits, etc.
* If it fails the constraints, check whether we would
* have accepted this if it had come in clear.
*/
if (ipp->ipp_use_ah) {
ip6h);
*reason = "unprotected not accepted";
break;
}
if (ah_assoc->ipsa_auth_alg !=
ipp->ipp_auth_alg) {
*reason = "unacceptable ah alg";
break;
}
/*
* Don't allow this. Check IPSEC NOTE above
* ip_fanout_proto().
*/
*reason = "unexpected AH";
break;
}
if (ipp->ipp_use_esp) {
ip6h);
*reason = "unprotected not accepted";
break;
}
if (esp_assoc->ipsa_encr_alg !=
ipp->ipp_encr_alg) {
*reason = "unacceptable esp alg";
break;
}
/*
* If the client does not need authentication,
* we don't verify the alogrithm.
*/
if (ipp->ipp_use_espa) {
if (esp_assoc->ipsa_auth_alg !=
ipp->ipp_esp_auth_alg) {
*reason = "unacceptable esp auth alg";
break;
}
}
/*
* Don't allow this. Check IPSEC NOTE above
* ip_fanout_proto().
*/
*reason = "unexpected ESP";
break;
}
if (ipp->ipp_use_se) {
if (!decaps) {
ip6h);
if (!ret) {
/* XXX mutant? */
*reason = "self encap not found";
break;
}
}
} else if (decaps) {
/*
* XXX If the packet comes in tunneled and the
* recipient does not expect it to be tunneled, it
* is okay. But we drop to be consistent with the
* other cases.
*/
*reason = "unexpected self encap";
break;
}
/*
* This can happen if we do a double policy-check on
* a packet
* XXX XXX should fix this case!
*/
}
break; /* from switch */
}
return (ret);
}
static boolean_t
{
}
/*
* Takes a latched conn and an inbound packet and returns a unique_id suitable
* for SA comparisons. Most of the time we will copy from the conn_t, but
* there are cases when the conn_t is latched but it has wildcard selectors,
* and then we need to fallback to scooping them out of the packet.
*
* Assume we'll never have 0 with a conn_t present, so use 0 as a failure. We
* latched conn_ts.
*
* Ideal candidate for an "inline" keyword, as we're JUST convoluted enough
* to not be a nice macro.
*/
static uint64_t
{
/* Slow path - we gotta grab from the packet. */
SEL_NONE) != SELRET_SUCCESS) {
/* Failure -> have caller free packet with ENOMEM. */
return (0);
}
sel.ips_protocol, 0));
}
#ifdef DEBUG_NOT_UNTIL_6478464
}
#endif
}
/*
* Called to check policy on a latched connection, both from this file
* and from tcp.c
*/
{
if (!ii->ipsec_in_loopback) {
/*
* Over loopback, there aren't real security associations,
* so there are neither identities nor "unique" values
* for us to check the packet against.
*/
*reason = "AH identity mismatch";
return (B_FALSE);
}
*reason = "ESP identity mismatch";
return (B_FALSE);
}
/*
* Can fudge pkt_unique from connp because we're latched.
* In DEBUG kernels (see conn_to_unique()'s implementation),
* verify this even if it REALLY slows things down.
*/
return (B_FALSE);
}
}
}
/*
* Check to see whether this secured datagram meets the policy
* constraints specified in ipsp.
*
* Called from ipsec_check_global_policy, and ipsec_check_inbound_policy.
*
* Consumes a reference to ipsp.
*/
static mblk_t *
{
ipsec_in_t *ii;
const char *reason = "no policy actions found";
if (ii->ipsec_in_loopback)
/*
* this can happen if we do a double policy-check on a packet
* Would be nice to be able to delete this test..
*/
}
reason = "inbound AH and ESP identities differ";
goto drop;
}
goto drop;
/*
* Ok, now loop through the possible actions and see if any
* of them work for us.
*/
return (first_mp);
}
}
drop:
"ipsec inbound policy mismatch: %s, packet dropped\n",
reason);
return (NULL);
}
/*
* sleazy prefix-length-based compare.
* another inlining candidate..
*/
{
/*
* and there was much evil..
* XXX should inline-expand the bcmp here and do this 32 bits
* or 64 bits at a time..
*/
((bitsleft == 0) ||
}
static ipsec_policy_t *
{
ipsec_policy_t *p;
continue;
if ((valid & IPSL_PROTOCOL) &&
continue;
if ((valid & IPSL_REMOTE_ADDR) &&
continue;
if ((valid & IPSL_LOCAL_ADDR) &&
continue;
if ((valid & IPSL_REMOTE_PORT) &&
continue;
if ((valid & IPSL_LOCAL_PORT) &&
continue;
if (!is_icmp_inv_acq) {
if ((valid & IPSL_ICMP_TYPE) &&
continue;
}
if ((valid & IPSL_ICMP_CODE) &&
sel->ips_icmp_code)) {
continue;
}
} else {
/*
* special case for icmp inverse acquire
*/
continue;
}
/* we matched all the packet-port-field selectors! */
best = p;
}
return (best);
}
/*
* Try to find and return the best policy entry under a given policy
* root for a given set of selectors; the first parameter "best" is
* the current best policy so far. If "best" is non-null, we have a
* reference to it. We return a reference to a policy; if that policy
* is not the original "best", we need to release that reference
* before returning.
*/
{
#ifdef DEBUG
if (is_icmp_inv_acq) {
" expecting icmp, got %d",
sel->ips_protocol);
}
} else {
" expecting icmpv6, got %d",
sel->ips_protocol);
}
}
}
#endif
if (root->ipr_nchains > 0) {
}
/*
* Adjust reference counts if we found anything new.
*/
}
}
return (curbest);
}
/*
* Find the best system policy (either global or per-interface) which
* applies to the given selector; look in all the relevant policy roots
* to figure out which policy wins.
*
* Returns a reference to a policy; caller must release this
* reference when done.
*/
{
ipsec_policy_t *p;
}
return (p);
}
/*
* Check with global policy and see whether this inbound
* packet meets the policy constraints.
*
* Locate appropriate policy from global policy, supplemented by the
*
* Dispatch to ipsec_check_ipsecin_policy if we have policy and an
* encrypted packet to see if they match.
*
* Otherwise, see if the policy allows cleartext; if not, drop it on the
* floor.
*/
mblk_t *
{
ipsec_policy_t *p;
sel.ips_is_icmp_inv_acq = 0;
else
/*
* No global policy and no per-socket policy;
* just pass it back (but we shouldn't get here in that case)
*/
return (first_mp);
}
}
/*
* If we have cached policy, use it.
* Otherwise consult system policy.
*/
if (p != NULL) {
IPPOL_REFHOLD(p);
}
/*
* Fudge sel for UNIQUE_ID setting below.
*/
} else {
/* Initialize the ports in the selector */
SEL_NONE) == SELRET_NOMEM) {
/*
* Technically not a policy mismatch, but it is
* an internal failure.
*/
goto fail;
}
/*
* Find the policy which best applies.
*
* If we find global policy, we should look at both
* local policy and global policy and see which is
* stronger and match accordingly.
*
* If we don't find a global policy, check with
* local policy alone.
*/
ns);
}
if (p == NULL) {
/*
* We have no policy; default to succeeding.
* XXX paranoid system design doesn't do this.
*/
return (first_mp);
} else {
ns);
goto fail;
}
}
pkt_unique, ns));
}
if (p->ipsp_act->ipa_allow_clear) {
IPPOL_REFRELE(p, ns);
return (first_mp);
}
IPPOL_REFRELE(p, ns);
/*
* If we reach here, we will drop the packet because it failed the
* global policy check because the packet was cleartext, and it
* should not have been.
*/
fail:
return (NULL);
}
/*
* We check whether an inbound datagram is a valid one
* to accept in clear. If it is secure, it is the job
* of IPSEC to log information appropriately if it
* suspects that it may not be the real one.
*
* It is called only while fanning out to the ULP
* where ULP accepts only secure data and the incoming
* is clear. Usually we never accept clear datagrams in
* such cases. ICMP is the only exception.
*
* NOTE : We don't call this function if the client (ULP)
* is willing to accept things in clear.
*/
{
&nexthdrp)) {
return (B_FALSE);
}
if (*nexthdrp != IPPROTO_ICMPV6)
return (B_FALSE);
/* Match IPv6 ICMP policy as closely as IPv4 as possible. */
switch (icmp6->icmp6_type) {
case ICMP6_PARAM_PROB:
case ICMP6_ECHO_REQUEST:
/* Just like IPv4. */
return (B_FALSE);
case MLD_LISTENER_QUERY:
case MLD_LISTENER_REPORT:
case MLD_LISTENER_REDUCTION:
/*
* XXX Seperate NDD in IPv4 what about here?
* Plus, mcast is important to ND.
*/
case ICMP6_DST_UNREACH:
case ICMP6_PACKET_TOO_BIG:
case ICMP6_ECHO_REPLY:
/* These are trusted in IPv4. */
case ND_ROUTER_SOLICIT:
case ND_ROUTER_ADVERT:
case ND_NEIGHBOR_SOLICIT:
case ND_NEIGHBOR_ADVERT:
case ND_REDIRECT:
/* Trust ND messages for now. */
case ICMP6_TIME_EXCEEDED:
default:
return (B_TRUE);
}
} else {
/*
* If it is not ICMP, fail this request.
*/
#ifdef FRAGCACHE_DEBUG
#endif
return (B_FALSE);
}
/*
* It is an insecure icmp message. Check to see whether we are
* willing to accept this one.
*/
switch (icmph->icmph_type) {
case ICMP_ECHO_REPLY:
case ICMP_TIME_STAMP_REPLY:
case ICMP_INFO_REPLY:
/*
* We should not encourage clear replies if this
* client expects secure. If somebody is replying
* in clear some mailicious user watching both the
* request and reply, can do chosen-plain-text attacks.
* With global policy we might be just expecting secure
* but sending out clear. We don't know what the right
* thing is. We can't do much here as we can't control
* the sender here. Till we are sure of what to do,
* accept them.
*/
return (B_TRUE);
case ICMP_ECHO_REQUEST:
case ICMP_TIME_STAMP_REQUEST:
case ICMP_INFO_REQUEST:
case ICMP_ROUTER_SOLICITATION:
case ICMP_ADDRESS_MASK_REPLY:
/*
* Don't accept this as somebody could be sending
* us plain text to get encrypted data. If we reply,
* it will lead to chosen plain text attack.
*/
return (B_FALSE);
case ICMP_DEST_UNREACHABLE:
switch (icmph->icmph_code) {
/*
* Be in sync with icmp_inbound, where we have
* already set ire_max_frag.
*/
#ifdef FRAGCACHE_DEBUG
#endif
return (B_TRUE);
case ICMP_HOST_UNREACHABLE:
case ICMP_NET_UNREACHABLE:
/*
* By accepting, we could reset a connection.
* How do we solve the problem of some
* intermediate router sending in-secure ICMP
* messages ?
*/
return (B_TRUE);
case ICMP_PORT_UNREACHABLE:
default :
return (B_FALSE);
}
case ICMP_SOURCE_QUENCH:
/*
* If this is an attack, TCP will slow start
* because of this. Is it very harmful ?
*/
return (B_TRUE);
case ICMP_PARAM_PROBLEM:
return (B_FALSE);
case ICMP_TIME_EXCEEDED:
return (B_TRUE);
case ICMP_REDIRECT:
return (B_FALSE);
default :
return (B_FALSE);
}
}
}
void
{
if (ipl->ipl_ids_latched) {
/* I lost, someone else got here before me */
return;
}
}
void
{
if (!ipl->ipl_ids_latched) {
if (!ii->ipsec_in_loopback) {
else
}
}
}
/*
* Check whether the policy constraints are met either for an
* inbound datagram; called from IP in numerous places.
*
* Note that this is not a chokepoint for inbound policy checks;
* see also ipsec_check_ipsecin_latch() and ipsec_check_global_policy()
*/
mblk_t *
{
ipsec_in_t *ii;
netstack_t *ns;
/*
* This is the case where the incoming datagram is
* cleartext and we need to see whether this client
* would like to receive such untrustworthy things from
* the wire.
*/
/*
* Policy is cached in the conn.
*/
if (ret) {
return (first_mp);
} else {
"ipsec_check_inbound_policy", ipha,
NULL,
return (NULL);
}
} else {
return (first_mp);
}
} else {
/*
* As this is a non-hardbound connection we need
* to look at both per-socket policy and global
* policy. As this is cleartext, mark the mp as
* M_DATA in case if it is an ICMP error being
* reported before calling ipsec_check_global_policy
* so that it does not mistake it for IPSEC_IN.
*/
return (first_mp);
}
}
/*
* If it is inbound check whether the attached message
* is secure or not. We have a special case for ICMP,
* where we have a IPSEC_IN message and the attached
* message is not secure. See icmp_inbound_error_fanout
* for details.
*/
if (!ii->ipsec_in_secure)
goto clear;
/*
* mp->b_cont could be either a M_CTL message
* for icmp errors being sent up or a M_DATA message.
*/
/*
* We don't have policies cached in the conn
* for this stream. So, look at the global
* policy. It will check against conn or global
* depending on whichever is stronger.
*/
}
/* Policy is cached & latched; fast(er) path */
const char *reason;
return (first_mp);
}
"ipsec inbound policy mismatch: %s, packet dropped\n",
reason);
return (NULL);
return (first_mp);
}
/*
* NOTE: ipsecIn{Failed,Succeeeded} bumped by
* ipsec_check_ipsecin_policy().
*/
return (first_mp);
}
/*
* Returns:
*
* SELRET_NOMEM --> msgpullup() needed to gather things failed.
* SELRET_BADPKT --> If we're being called after tunnel-mode fragment
* gathering, the initial fragment is too short for
* useful data. Only returned if SEL_TUNNEL_FIRSTFRAG is
* set.
* SELRET_SUCCESS --> "sel" now has initialized IPsec selector data.
* SELRET_TUNFRAG --> This is a fragment in a tunnel-mode packet. Caller
* should put this packet in a fragment-gathering queue.
* Only returned if SEL_TUNNEL_MODE and SEL_PORT_POLICY
* is set.
*/
static selret_t
{
int outer_hdr_len = 0; /* For ICMP tunnel-mode cases... */
if (is_icmp)
switch (nexthdr) {
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
case IPPROTO_FRAGMENT:
/*
* Use ip_hdr_length_nexthdr_v6(). And have a spare
* mblk that's contiguous to feed it
*/
return (SELRET_NOMEM);
/* Malformed packet - caller frees. */
return (SELRET_BADPKT);
}
/* We can just extract based on hdr_len now. */
break;
default:
break;
}
/* IPv6 Fragment */
return (SELRET_TUNFRAG);
}
} else {
if (is_icmp)
if (port_policy_present &&
!is_icmp) {
/* IPv4 Fragment */
return (SELRET_TUNFRAG);
}
}
(!port_policy_present && tunnel_mode)) {
return (SELRET_SUCCESS);
}
/* If we didn't pullup a copy already, do so now. */
/*
* apart from IP or options? If so, perhaps we should revisit
* the spare_mp strategy.
*/
return (SELRET_NOMEM);
}
} else {
}
if (nexthdr == check_proto) {
} else {
}
return (SELRET_SUCCESS);
}
static boolean_t
{
/*
* XXX cut&paste shared with ipsec_init_inbound_sel
*/
switch (nexthdr) {
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
case IPPROTO_FRAGMENT:
/*
* Use ip_hdr_length_nexthdr_v6(). And have a spare
* mblk that's contiguous to feed it
*/
/* Always works, even if NULL. */
return (B_FALSE);
} else {
/* We can just extract based on hdr_len now. */
}
break;
default:
break;
}
} else {
}
return (B_TRUE);
}
/* If we didn't pullup a copy already, do so now. */
/*
* apart from IP or options? If so, perhaps we should revisit
* the spare_mp strategy.
*
* XXX should this be msgpullup(mp, hdr_len+4) ???
*/
return (B_FALSE);
}
} else {
}
if (nexthdr == check_proto) {
} else {
}
return (B_TRUE);
}
/*
* Create an ipsec_action_t based on the way an inbound packet was protected.
* Used to reflect traffic back to a sender.
*
* We don't bother interning the action into the hash table.
*/
{
return (NULL);
/*
* Get the algorithms that were used for this packet.
*/
}
}
}
return (ap);
}
/*
* Compute the worst-case amount of extra space required by an action.
* Note that, because of the ESP considerations listed below, this is
* actually not the same as the best-case reduction in the MTU; in the
* future, we should pass additional information to this function to
* allow the actual MTU impact to be computed.
*
* AH: Revisit this if we implement algorithms with
* a verifier size of more than 12 bytes.
*
* ESP: A more exact but more messy computation would take into
* account the interaction between the cipher block size and the
* effective MTU, yielding the inner payload size which reflects a
* packet with *minimum* ESP padding..
*/
{
if (ipp->ipp_use_ah)
if (ipp->ipp_use_esp) {
}
if (ipp->ipp_use_se)
}
return (overhead);
}
/*
* This hash function is used only when creating policies and thus is not
* performance-critical for packet flows.
*
* Future work: canonicalize the structures hashed with this (i.e.,
* zeroize padding) so the hash works correctly.
*/
/* ARGSUSED */
static uint32_t
{
return (0);
}
/*
* Hash function macros for each address type.
*
* The IPV6 hash function assumes that the low order 32-bits of the
* address (typically containing the low order 24 bits of the mac
* address) are reasonably well-distributed. Revisit this if we run
* into trouble from lots of collisions on ::1 addresses and the like
* (seems unlikely).
*/
#define IPSEC_IPV4_HASH(a, n) ((a) % (n))
/*
* These two hash functions should produce coordinated values
* but have slightly different roles.
*/
static uint32_t
{
if (!(valid & IPSL_REMOTE_ADDR))
return (IPSEC_SEL_NOHASH);
}
}
}
}
return (IPSEC_SEL_NOHASH);
}
static uint32_t
{
root->ipr_nchains));
}
}
/*
* Intern actions into the action hash table.
*/
{
int i;
/*
* TODO: should canonicalize a[] (i.e., zeroize any padding)
* so we can use a non-trivial policy_hash function.
*/
for (i = n-1; i >= 0; i--) {
continue;
continue;
break;
}
continue;
}
/*
* need to allocate a new one..
*/
return (NULL);
}
overhead = ipsec_act_ovhd(&a[i]);
if ((a[i].ipa_type == IPSEC_ACT_BYPASS) ||
(a[i].ipa_type == IPSEC_ACT_CLEAR))
if (a[i].ipa_type == IPSEC_ACT_APPLY) {
}
if (prev)
}
return (ap);
}
/*
* Called when refcount goes to 0, indicating that all references to this
* node are gone.
*
* This does not unchain the action from the hash table.
*/
void
{
for (;;) {
/* Inlined IPACT_REFRELE -- avoid recursion */
break;
membar_exit();
break;
/* End inlined IPACT_REFRELE */
}
}
/*
* Called when the action hash table goes away.
*
* The actions can be queued on an mblk with ipsec_in or
* ipsec_out, hence the actions might still be around.
* But we decrement ipa_refs here since we no longer have
* a reference to the action from the hash table.
*/
static void
{
/* FIXME: remove? */
(void) printf("ipsec_action_free_table(%p) ref %d\n",
}
}
/*
* Need to walk all stack instances since the reclaim function
* is global for all instances
*/
/* ARGSUSED */
static void
ipsec_action_reclaim(void *arg)
{
netstack_t *ns;
}
}
/*
* Periodically sweep action hash table for actions with refcount==1, and
* nuke them. We cannot do this "on demand" (i.e., from IPACT_REFRELE)
* because we can't close the race between another thread finding the action
* in the hash table without holding the bucket lock during IPACT_REFRELE.
* Instead, we run this function sporadically to clean up after ourselves;
* we also set it as the "reclaim" function for the action kmem_cache.
*
* Note that it may take several passes of ipsec_action_gc() to free all
* "stale" actions.
*/
static void
{
int i;
for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
/* skip the lock if nobody home */
continue;
continue;
ipss->ipsec_action_hash, i);
}
}
}
/*
* Intern a selector set into the selector set hash table.
* This is simpler than the actions case..
*/
static ipsec_sel_t *
{
/*
* Exactly one AF bit should be set in selkey.
*/
/* Set pol_hval to uninitialized until we put it in a polhead. */
break;
}
return (sp);
}
return (NULL);
}
/* Set to uninitalized and have insertion into polhead fix things. */
else
return (sp);
}
static void
{
if (hval == IPSEC_SEL_NOHASH)
hval = 0;
/* Caller unlocks */
return;
}
}
/*
* Free a policy rule which we know is no longer being referenced.
*/
void
{
}
/*
* Construction of new policy rules; construct a policy, and add it to
* the appropriate tables.
*/
{
}
return (NULL);
}
(*index_ptr)++;
return (ipp);
}
static void
{
if (hashpol) {
return;
}
}
netstack_t *ns)
{
int af;
return (B_FALSE);
} else {
}
continue;
}
return (B_TRUE);
}
return (B_FALSE);
}
int
netstack_t *ns)
{
/*
* We could be cleverer here about the walk.
* but well, (k+1)*log(N) will do for now (k==number of matches,
* N==number of table entries
*/
for (;;) {
break;
break;
}
}
if (found) {
}
}
/*
* Given a constructed ipsec_policy_t policy rule, see if it can be entered
* into the correct policy ruleset. As a side-effect, it sets the hash
* entries on "ipp"'s ipsp_pol_hval.
*
* Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a
* duplicate policy exists with exactly the same selectors), or an icmp
* rule exists with a different encryption/authentication action.
*/
{
int af = -1;
af = IPSEC_AF_V6;
} else {
af = IPSEC_AF_V4;
}
/*
* Double-check that we don't have any duplicate selectors here.
* Because selectors are interned below, we need only compare pointers
* for equality.
*/
} else {
pr->ipr_nchains) :
pr->ipr_nchains);
}
return (B_FALSE);
}
/*
* If it's ICMP and not a drop or pass rule, run through the ICMP
* rules and make sure the action is either new or the same as any
* other actions. We don't have to check the full chain because
* discard and bypass will override all other actions
*/
if (valid & IPSL_PROTOCOL &&
IPSEC_ACT_APPLY)) {
}
}
}
return (B_TRUE);
}
/*
* compare the action chains of two policies for equality
* B_TRUE -> effective equality
*/
static boolean_t
{
/* We have a valid rule. Let's compare the actions */
/* same action. We are good */
return (B_TRUE);
}
/* we have to walk the chain */
/* otherwise, Are we close enough? */
/* Nope, we aren't */
return (B_FALSE);
}
if (act1->ipa_want_ah) {
return (B_FALSE);
}
return (B_FALSE);
}
}
if (act1->ipa_want_esp) {
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
}
return (B_FALSE);
}
return (B_FALSE);
}
}
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Given a constructed ipsec_policy_t policy rule, enter it into
* the correct policy ruleset.
*
* ipsec_check_policy() is assumed to have succeeded first (to check for
* duplicates).
*/
void
netstack_t *ns)
{
int af = -1;
af = IPSEC_AF_V6;
} else {
af = IPSEC_AF_V4;
}
if (hval == IPSEC_SEL_NOHASH) {
} else {
}
}
static void
netstack_t *ns)
{
}
}
}
}
}
void
{
int dir;
}
void
{
int dir;
int chain;
}
}
static void
{
int af;
ipr->ipr_nchains = 0;
}
}
ipsec_polhead_create(void)
{
return (php);
return (php);
}
/*
* Clone the policy head into a new polhead; release one reference to the
* old one and return the only reference to the new one.
* If the old one had a refcount of 1, just return it.
*/
{
return (ipsec_polhead_create());
return (php);
nphp = ipsec_polhead_create();
return (NULL);
return (NULL);
}
return (nphp);
}
/*
* When sending a response to a ICMP request or generating a RST
* in the TCP case, the outbound packets need to go at the same level
* of protection as the incoming ones i.e we associate our outbound
* policy with how the packet came in. We call this after we have
* accepted the incoming packet which may or may not have been in
* clear and hence we are sending the reply back with the policy
* matching the incoming datagram's policy.
*
* NOTE : This technology serves two purposes :
*
* 1) If we have multiple outbound policies, we send out a reply
* matching with how it came in rather than matching the outbound
* policy.
*
* 2) For assymetric policies, we want to make sure that incoming
* and outgoing has the same level of protection. Assymetric
* policies exist only with global policy where we may not have
* both outbound and inbound at the same time.
*
* NOTE2: This function is called by cleartext cases, so it needs to be
* in IP proper.
*/
{
ipsec_in_t *ii;
netstack_t *ns;
/* transfer reference.. */
} else if (!ii->ipsec_in_loopback)
/*
* The caller is going to send the datagram out which might
* go on the wire or delivered locally through ip_wput_local.
*
* 1) If it goes out on the wire, new associations will be
* obtained.
* 2) If it is delivered locally, ip_wput_local will convert
* this IPSEC_OUT to a IPSEC_IN looking at the requests.
*/
ns->netstack_ipsec))
return (B_FALSE);
/*
* Don't use global policy for this, as we want
* to use the same protection that was applied to the inbound packet.
*/
return (B_TRUE);
}
mblk_t *
{
return (NULL);
}
/*
* Bump refcounts.
*/
/*
* Copy everything, but preserve the free routine provided by
* ipsec_in_alloc().
*/
return (nmp);
}
mblk_t *
{
return (NULL);
}
/*
* Bump refcounts.
*/
/*
* Copy everything, but preserve the free routine provided by
* ipsec_alloc_ipsec_out().
*/
return (nmp);
}
static void
{
/* Note: IPSA_REFRELE is multi-line macro */
}
if (io->ipsec_out_latch) {
}
}
static void
ipsec_out_free(void *arg)
{
}
static void
{
/* Note: IPSA_REFRELE is multi-line macro */
}
}
static void
ipsec_in_free(void *arg)
{
}
/*
* This is called only for outbound datagrams if the datagram needs to
* go out secure. A NULL mp can be passed to get an ipsec_out. This
* facility is used by ip_unbind.
*
* NOTE : o As the data part could be modified by ipsec_out_process etc.
* we can't make it fast by calling a dup.
*/
mblk_t *
{
return (NULL);
/*
* Set the zoneid to ALL_ZONES which is used as an invalid value. Code
* using ipsec_out_zoneid should assert that the zoneid has been set to
* a sane value.
*/
&io->ipsec_out_frtn);
return (NULL);
}
return (ipsec_mp);
}
/*
* Attach an IPSEC_OUT; use pol for policy if it is non-null.
* Otherwise initialize using conn.
*
* If pol is non-null, we consume a reference to it.
*/
mblk_t *
{
"ipsec_attach_ipsec_out: Allocation failure\n");
return (NULL);
}
/*
*/
}
/*
* Initialize the IPSEC_OUT (ipsec_mp) using pol if it is non-null.
* Otherwise initialize using conn.
*
* If pol is non-null, we consume a reference to it.
*/
mblk_t *
{
ipsec_policy_t *p;
/*
* Set the zoneid when we have the connp.
* Otherwise, we're called from ip_wput_attach_policy() who will take
* care of setting the zoneid.
*/
} else {
}
} else {
}
p = NULL;
/*
* Take latched policies over global policy. Check here again for
* this, in case we had conn_latch set while the packet was flying
* around in IP.
*/
if (p != NULL) {
IPPOL_REFHOLD(p);
}
p = pol;
/*
* conn does not have the port information. Get
* it from the packet.
*/
ns->netstack_ipsec)) {
/* Callee did ip_drop_packet() on *mp. */
return (NULL);
}
}
io->ipsec_out_policy = p;
if (p == NULL) {
}
} else {
/* Handle explicit drop action. */
}
}
return (ipsec_mp);
}
/*
* Allocate an IPSEC_IN mblk. This will be prepended to an inbound datagram
* and keep track of what-if-any IPsec processing will be applied to the
* datagram.
*/
mblk_t *
{
return (NULL);
&ii->ipsec_in_frtn);
ip1dbg(("ipsec_in_alloc: IPSEC_IN allocation failure.\n"));
return (NULL);
}
return (ipsec_in);
}
/*
* This is called from ip_wput_local when a packet which needs
* security is looped back, to convert the IPSEC_OUT to a IPSEC_IN
* before fanout, where the policy check happens. In most of the
* cases, IPSEC processing has *never* been done. There is one case
* (ip_wput_ire_fragmentit -> ip_wput_frag -> icmp_frag_needed) where
* the packet is destined for localhost, IPSEC processing has already
* been done.
*
* Future: This could happen after SA selection has occurred for
* outbound.. which will tell us who the src and dst identities are..
* ipsec_out_t to the ipsec_in_t.
*/
void
{
ipsec_in_t *ii;
netstack_t *ns;
}
}
/*
* In most of the cases, we can't look at the ipsec_out_XXX_sa
* because this never went through IPSEC processing. So, look at
* the requests and infer whether it would have gone through
* IPSEC processing or not. Initialize the "done" fields with
* the requests. The possible values for "done" fields are :
*
* 1) zero, indicates that a particular preference was never
* requested.
* 2) non-zero, indicates that it could be IPSEC_PREF_REQUIRED/
* IPSEC_PREF_NEVER. If IPSEC_REQ_DONE is set, it means that
* IPSEC processing has been completed.
*/
}
/*
* Consults global policy to see whether this datagram should
* go out secure. If so it attaches a ipsec_mp in front and
* returns.
*/
mblk_t *
{
else
/*
* Fast Path to see if there is any policy.
*/
if (!policy_present) {
if (!io->ipsec_out_secure) {
/*
* If there is no global policy and ip_wput
* or ip_wput_multicast has attached this mp
* for multicast case, free the ipsec_mp and
* return the original mp.
*/
}
}
return (ipsec_mp);
}
ill_index = 0;
/*
* This is a connection where we have some per-socket
* policy or ip_wput has attached an ipsec_mp for
* the multicast datagram.
*/
if (!io->ipsec_out_secure) {
/*
* This ipsec_mp was allocated in ip_wput or
* ip_wput_multicast so that we will know the
* value of ill_index, conn_dontroute,
* conn_multicast_loop in the multicast case if
* we inherit global policy here.
*/
}
}
} else {
if (!unspec_src)
} else {
}
if (is_fragment) {
/*
* It's a packet fragment for a packet that
* we have already processed (since IPsec processing
* is done before fragmentation), so we don't
* have to do policy checks again. Fragments can
* come back to us for processing if they have
* been queued up due to flow control.
*/
}
return (ipsec_mp);
}
/* IPv6 common-case. */
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
case IPPROTO_ICMPV6:
break;
default:
return (NULL);
}
break;
}
}
} else {
}
/* Callee dropped the packet. */
return (NULL);
}
/*
* We seem to have some local policy (we already have
* an ipsec_out). Look at global policy and see
* whether we have to inherit or not.
*/
return (ipsec_mp);
}
/*
* We pass in a pointer to a pointer because mp can become
* NULL due to allocation failures or explicit drops. Callers
* of this function should assume a NULL mp means the packet
* was dropped.
*/
return (mp);
/*
* Copy the right port information.
*/
/*
* Set ill_index, conn_dontroute and conn_multicast_loop
* for multicast datagrams.
*/
return (ipsec_mp);
}
/*
* When appropriate, this function caches inbound and outbound policy
* for this connection.
*
* XXX need to work out more details about per-interface policy and
* caching here!
*
* XXX may want to split inbound and outbound caching for ill..
*/
int
{
/*
* There is no policy latching for ICMP sockets because we can't
* decide on which policy to use until we see the packet and get
*/
}
return (0);
}
ipsec_policy_t *p;
return (ENOMEM);
}
sel.ips_is_icmp_inv_acq = 0;
if (isv4) {
} else {
}
ns);
ns);
/* Clear the latched actions too, in case we're recaching. */
}
/*
* We may or may not have policy for this endpoint. We still set
* conn_policy_cached so that inbound datagrams don't have to look
* at global policy as policy is considered latched for these
* endpoints. We should not set conn_policy_cached until the conn
* reflects the actual policy. If we *set* this before inheriting
* the policy there is a window where the check
* CONN_INBOUND_POLICY_PRESENT, will neither check with the policy
* on the conn (because we have not yet copied the policy on to
* conn and hence not set conn_in_enforce_policy) nor with the
* global policy (because conn_policy_cached is already set).
*/
if (connp->conn_in_enforce_policy)
return (0);
}
void
{
}
{
return (ipl);
return (ipl);
}
/*
* Hash function for ID hash table.
*/
static uint32_t
{
unsigned char c;
while ((c = *idstring++) != 0) {
hval ^= c;
}
}
/*
* Look up identity string in hash table. Return identity object
* corresponding to the name -- either preexisting, or newly allocated.
*
* Return NULL if we need to allocate a new one and can't get memory.
*/
ipsid_t *
{
char *nstr;
continue;
continue;
return (retval);
}
if (!retval) {
return (NULL);
}
if (!nstr) {
return (NULL);
}
return (retval);
}
/*
* Garbage collect the identity hash table.
*/
void
{
int i, len;
for (i = 0; i < IPSID_HASHSIZE; i++) {
if (id->ipsid_refcnt == 0) {
}
}
}
}
/*
* Return true if two identities are the same.
*/
{
return (B_TRUE);
#ifdef DEBUG
return (B_FALSE);
/*
* test that we're interning id's correctly..
*/
#endif
return (B_FALSE);
}
/*
* Initialize identity table; called during module initialization.
*/
static void
{
int i;
for (i = 0; i < IPSID_HASHSIZE; i++) {
}
}
/*
* Free identity table (preparatory to module unload)
*/
static void
{
int i;
for (i = 0; i < IPSID_HASHSIZE; i++) {
}
}
/*
* Update the minimum and maximum supported key sizes for the
* specified algorithm. Must be called while holding the algorithms lock.
*/
void
netstack_t *ns)
{
int crypto_rc, i;
/*
* Compute the min, max, and default key sizes (in number of
* increments to the default key size in bits) as defined
* by the algorithm mappings. This range of key sizes is used
* for policy related operations. The effective key sizes
* supported by the framework could be more limited than
* those defined for an algorithm.
*/
if (alg->alg_increment != 0) {
/* key sizes are defined by range & increment */
} else if (alg->alg_nkey_sizes == 0) {
/* no specified key size for algorithm */
} else {
/* key sizes are defined by enumeration */
alg->alg_maxbits = 0;
for (i = 0; i < alg->alg_nkey_sizes; i++) {
}
alg->alg_default = 0;
}
return;
/*
* Mechanisms do not apply to the NULL encryption
* algorithm, so simply return for this case.
*/
return;
/*
* Find the min and max key sizes supported by the cryptographic
* framework providers.
*/
/* get the key sizes supported by the framework */
return;
}
/* min and max key sizes supported by framework */
int unit_bits;
/*
* Ignore entries that do not support the operations
* needed for the algorithm type.
*/
if (alg_type == IPSEC_ALG_AUTH) {
} else {
}
continue;
if (cur_crypto_min < crypto_min)
/*
* CRYPTO_EFFECTIVELY_INFINITE is a special value of
* the crypto framework which means "no upper limit".
*/
if (mech_infos[i].mi_max_key_size ==
} else if (cur_crypto_max > crypto_max) {
}
}
if (!is_valid) {
/* no key sizes supported by framework */
return;
}
/*
* Determine min and max key sizes from alg_key_sizes[].
* defined for the algorithm entry. Adjust key sizes based on
* those supported by the framework.
*/
if (alg->alg_increment != 0) {
/* supported key sizes are defined by range & increment */
/*
* If the sizes supported by the framework are outside
* the range of sizes defined by the algorithm mappings,
* the algorithm cannot be used. Check for this
* condition here.
*/
return;
}
} else if (alg->alg_nkey_sizes == 0) {
/* no specified key size for algorithm */
} else {
/* supported key sizes are defined by enumeration */
alg->alg_ef_maxbits = 0;
/*
* Ignore the current key size if it is not in the
* range of sizes supported by the framework.
*/
continue;
}
if (!is_valid) {
return;
}
alg->alg_ef_default = 0;
}
}
/*
* Free the memory used by the specified algorithm.
*/
void
{
return;
}
}
}
/*
* Check the validity of the specified key size for an algorithm.
* Returns B_TRUE if key size is valid, B_FALSE otherwise.
*/
{
return (B_FALSE);
/*
* If the key sizes are defined by enumeration, the new
* key size must be equal to one of the supported values.
*/
int i;
for (i = 0; i < alg->alg_nkey_sizes; i++)
break;
if (i == alg->alg_nkey_sizes)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Callback function invoked by the crypto framework when a provider
* registers or unregisters. This callback updates the algorithms
* tables when a crypto algorithm is no longer available or becomes
* associated with existing SAs, if needed.
*
* Need to walk all stack instances since the callback is global
* for all instances
*/
void
{
netstack_t *ns;
}
}
static void
netstack_t *ns)
{
/* ignore events for which we didn't register */
if (event != CRYPTO_EVENT_MECHS_CHANGED) {
ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x "
" received from crypto framework\n", event));
return;
}
return;
/*
* Walk the list of currently defined IPsec algorithm. Update
* the algorithm valid flag and trigger an update of the
* SAs that depend on that algorithm.
*/
algidx++) {
/*
* Skip the algorithms which do not map to the
* crypto framework provider being added or removed.
*/
CRYPTO_MAX_MECH_NAME) != 0)
continue;
/*
* Determine if the mechanism is valid. If it
* is not, mark the algorithm as being invalid. If
* it is, mark the algorithm as being valid.
*/
break;
if (mech_idx == mech_count &&
} else if (mech_idx < mech_count &&
}
/*
* Update the supported key sizes, regardless
* of whether a crypto provider was added or
* removed.
*/
if (!alg_changed &&
/*
* Update the affected SAs if a software provider is
* being added or removed.
*/
if (prov_change->ec_provider_type ==
prov_change->ec_change ==
}
}
if (alg_changed) {
/*
* An algorithm has changed, i.e. it became valid or
* invalid, or its support key sizes have changed.
* Notify ipsecah and ipsecesp of this change so
* that they can send a SADB_REGISTER to their consumers.
*/
}
}
/*
* Registers with the crypto framework to be notified of crypto
* providers changes. Used to update the algorithm tables and
* to free or create context templates if needed. Invoked after IPsec
* is loaded successfully.
*
* This is called separately for each IP instance, so we ensure we only
* register once.
*/
void
{
if (prov_update_handle != NULL)
return;
}
/*
* Unregisters from the framework to be notified of crypto providers
* changes. Called from ipsec_policy_g_destroy().
*/
static void
{
if (prov_update_handle != NULL)
}
/*
* Tunnel-mode support routines.
*/
/*
* Returns an mblk chain suitable for putnext() if policies match and IPsec
* SAs are available. If there's no per-tunnel policy, or a match comes back
* with no match, then still return the packet and have global policy take
* a crack at it in IP.
*
* Remember -> we can be forwarding packets. Keep that in mind w.r.t.
* inner-packet contents.
*/
mblk_t *
netstack_t *ns)
{
/* We take care of inners in a bit. */
/* No policy on this tunnel - let global policy have at it. */
return (mp);
if (inner_ipv4 != NULL) {
} else {
/* Use ip_get_dst_v6() just for the fragment bit. */
&is_fragment);
/*
* Reset, because we don't care about routing-header dests
* in the forwarding/tunnel path.
*/
}
if (is_fragment) {
int hdr_len;
/*
* We have a fragment we need to track!
*/
return (NULL);
/*
* If we get here, we have a full
* fragment chain
*/
hdr_len = ((outer_hdr_len != 0) ?
IPH_HDR_LENGTH(oiph) : 0);
} else {
}
&ip6_hdr_length, &v6_proto_p);
}
/* Was v6 outer */
}
inner_ipv4 = iph;
} else {
}
hdr_len);
(void) ip_hdr_length_nexthdr_v6(spare_mp,
&v6_proto_p);
v6_proto = *v6_proto_p;
#ifdef FRAGCACHE_DEBUG
#endif
}
/* Ports are extracted below */
}
/* Get ports... */
/*
* callee did ip_drop_packet_chain() on
* spare_mp
*/
return (NULL);
}
} else {
/* callee did ip_drop_packet_chain() on mp. */
return (NULL);
}
}
#ifdef FRAGCACHE_DEBUG
if (inner_ipv4 != NULL)
"(v4) sel.ips_protocol = %d, "
"sel.ips_local_port = %d, "
"sel.ips_remote_port = %d\n",
if (inner_ipv6 != NULL)
"(v6) sel.ips_protocol = %d, "
"sel.ips_local_port = %d, "
"sel.ips_remote_port = %d\n",
#endif
/* Success so far - done with spare_mp */
}
/*
* No matching policy on this tunnel, drop the packet.
*
* NOTE: Tunnel-mode tunnels are different from the
* IP global transport mode policy head. For a tunnel-mode
* tunnel, we drop the packet in lieu of passing it
* along accepted the way a global-policy miss would.
*
* NOTE2: "negotiate transport" tunnels should match ALL
* inbound packets, but we do not uncomment the ASSERT()
*/
/* ASSERT(itp->itp_flags & ITPF_P_TUNNEL); */
#ifdef FRAGCACHE_DEBUG
"per-port policy\n");
#endif
return (NULL);
}
#ifdef FRAGCACHE_DEBUG
#endif
/* Construct an IPSEC_OUT message. */
return (NULL);
}
/*
* NOTE: free() function of ipsec_out mblk will release polhead and
* pol references.
*/
/* Set up transport mode for tunnelled packets. */
return (ipsec_mp);
}
/* Fill in tunnel-mode goodies here. */
/* XXX Do I need to fill in all of the goodies here? */
if (inner_ipv4) {
io->ipsec_out_insrc[0] =
io->ipsec_out_indst[0] =
} else {
io->ipsec_out_insrc[0] =
io->ipsec_out_indst[0] =
}
/* NOTE: These are used for transport mode too. */
/*
* The mp pointer still valid
* Add ipsec_out to each fragment.
* The fragment head already has one
*/
return (NULL);
}
}
return (ipsec_mp_head);
}
/*
* NOTE: The following releases pol's reference and
* calls ip_drop_packet() for me on NULL returns.
*/
mblk_t *
{
/* Assume ipsec_mp is a chain of b_next-linked IPSEC_IN M_CTLs. */
/*
* Need IPPOL_REFHOLD(pol) for extras because
* ipsecin_policy does the refrele.
*/
/* First one */
} else {
}
} else {
/*
* ipsec_check_ipsecin_policy() freed ipsec_mp
* already. Need to get rid of any extra pol
* references, and any remaining bits as well.
*/
return (NULL);
}
}
/*
* One last release because either the loop bumped it up, or we never
* called ipsec_check_ipsecin_policy().
*/
/* data_chain is ready for return to tun module. */
return (data_chain);
}
/*
* Returns B_TRUE if the inbound packet passed an IPsec policy check. Returns
* B_FALSE if it failed or if it is a fragment needing its friends before a
* policy check can be performed.
*
* Expects a non-NULL *data_mp, an optional ipsec_mp, and a non-NULL polhead.
* data_mp may be reassigned with a b_next chain of packets if fragments
* neeeded to be collected for a proper policy check.
*
* Always frees ipsec_mp, but only frees data_mp if returns B_FALSE. This
* function calls ip_drop_packet() on data_mp if need be.
*
* NOTE: outer_hdr_len is signed. If it's a negative value, the caller
* is inspecting an ICMP packet.
*/
{
sel.ips_is_icmp_inv_acq = 0;
if (outer_ipv4 != NULL) {
} else {
}
if (outer_hdr_len < 0) {
outer_hdr_len = (-outer_hdr_len);
} else {
}
/*
* We need to perform full Tunnel-Mode enforcement,
* and we need to have inner-header data for such enforcement.
*
* See ipsec_init_inbound_sel() for the 0x80000000 on inbound
* and on return.
*/
inner_ipv6, flags);
switch (rc) {
case SELRET_NOMEM:
return (B_FALSE);
case SELRET_TUNFRAG:
/*
* At this point, if we're cleartext, we don't want
* to go there.
*/
return (B_FALSE);
}
/*
* Data is cached, fragment chain is not
* complete. I consume ipsec_mp and data_mp
*/
return (B_FALSE);
}
/*
* If we get here, we have a full fragment chain.
* Reacquire headers and selectors from first fragment.
*/
if (inner_ipv4 != NULL) {
} else {
}
/* Use SEL_NONE so we always get ports! */
switch (rc) {
case SELRET_SUCCESS:
/*
* Get to same place as first caller's
* SELRET_SUCCESS case.
*/
break;
case SELRET_NOMEM:
return (B_FALSE);
case SELRET_BADPKT:
return (B_FALSE);
case SELRET_TUNFRAG:
/* FALLTHRU */
default:
" returns bizarro 0x%x", rc);
/* Guaranteed panic! */
return (B_FALSE);
}
/* FALLTHRU */
case SELRET_SUCCESS:
/*
* Common case:
* No per-port policy or a non-fragment. Keep going.
*/
break;
case SELRET_BADPKT:
/*
* We may receive ICMP (with IPv6 inner) packets that
* trigger this return value. Send 'em in for
* enforcement checking.
*/
"sending 'bad packet' in for enforcement");
break;
default:
"ipsec_init_inbound_sel() returns bizarro 0x%x",
rc);
return (B_FALSE);
}
if (is_icmp) {
/*
*/
}
/* find_policy_head() */
if (!retval) {
/*
* XXX should never get here with
* tunnel reassembled fragments?
*/
NULL,
}
return (retval);
}
/*
* NOTE: The following releases pol's reference and
* calls ip_drop_packet() for me on NULL returns.
*
* "sel" is still good here, so let's use it!
*/
}
/*
* Else fallthru and check the global policy on the outer
* header(s) if this tunnel is an old-style transport-mode
* one. Drop the packet explicitly (no policy entry) for
* a new-style tunnel-mode tunnel.
*/
NULL,
return (B_FALSE);
}
}
/*
* NOTE: If we reach here, we will not have packet chains from
* fragcache_add(), because the only way I get chains is on a
* tunnel-mode tunnel, which either returns with a pass, or gets
* hit by the ip_drop_packet_chain() call right above here.
*/
/* If no per-tunnel security, check global policy now. */
/*
* This is an ICMP message with an ipsec_mp
* attached. We should accept it.
*/
return (B_TRUE);
}
return (B_FALSE);
}
/*
* The following assertion is valid because only the tun module alters
* the mblk chain - stripping the outer header by advancing mp->b_rptr.
*/
if (is_icmp) {
/*
* For ICMP packets, "outer_ipvN" is set to the outer header
* that is *INSIDE* the ICMP payload. For global policy
* order to construct selectors appropriately. See "ripha"
* constructions in ip.c. To avoid a bug like 6478464 (see
* in the packet, and reverse if after the call to
* ipsec_check_global_policy().
*/
if (outer_ipv4 != NULL) {
} else {
}
}
/* NOTE: Frees message if it returns NULL. */
return (B_FALSE);
}
if (is_icmp) {
/* Set things back to normal. */
if (outer_ipv4 != NULL) {
} else {
/* No need for ASSERT()s now. */
}
}
/*
* At this point, we pretend it's a cleartext accepted
* packet.
*/
return (B_TRUE);
}
/*
* AVL comparison routine for our list of tunnel polheads.
*/
static int
{
int rc;
}
/*
* Free a tunnel policy node.
*/
void
{
}
void
{
}
/*
* Public interface to look up a tunnel security policy by name. Used by
* spdsock mostly. Returns "node" with a bumped refcnt.
*/
{
}
return (node);
}
/*
* Public interface to walk all tunnel security polcies. Useful for spdsock
* DUMP operations. iterator() will not consume a reference.
*/
void
{
}
}
/*
* Initialize policy head. This can only fail if there's a memory problem.
*/
static boolean_t
{
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Create a tunnel policy node with "name". Set errno with
* ENOMEM if there's a memory problem, and EEXIST if there's an existing
* node.
*/
{
return (NULL);
}
return (NULL);
}
return (NULL);
}
goto nomem;
goto nomem;
}
goto nomem;
goto nomem;
}
return (newbie);
return (NULL);
}
/*
* We can't call the tun_t lookup function until tun is
* loaded, so create a dummy function to avoid symbol
* lookup errors on boot.
*/
/* ARGSUSED */
{
return (NULL); /* Always return NULL. */
}
/*
* Frag cache code, based on SunScreen 3.2 source
*/
#define IPSEC_FRAG_TTL_MAX 5
/*
* Note that the following parameters create 256 hash buckets
* with 1024 free entries to be distributed. Things are cleaned
* periodically and are attempted to be cleaned when there is no
* free space, but this system errs on the side of dropping packets
* over creating memory exhaustion. We may decide to make hash
* factor a tunable if this proves to be a bad decision.
*/
#define IPSEC_FRAG_HASH_FACTOR 4
(((id) / \
/* Maximum fragments per packet. 48 bytes payload x 1366 packets > 64KB */
#define IPSEC_MAX_FRAGS 1366
IPH_OFFSET) << 3)
/*
* Initialize an ipsec fragcache instance.
* Returns B_FALSE if memory allocation fails.
*/
{
int i;
kmem_zalloc(sizeof (ipsec_fragcache_entry_t *) *
return (B_FALSE);
ftemp = (ipsec_fragcache_entry_t *)
kmem_zalloc(sizeof (ipsec_fragcache_entry_t) *
return (B_FALSE);
}
for (i = 0; i < IPSEC_FRAG_HASH_SIZE; i++) {
ftemp++;
}
frag->itpf_expire_hint = 0;
return (B_TRUE);
}
void
{
int i;
/* Delete any existing fragcache entry chains */
for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
/* Returned fep is next in chain or NULL */
}
}
/*
* Chase the pointers back to the beginning
* of the memory allocation and then
* get rid of the allocated freelist
*/
/*
* XXX - If we ever dynamically grow the freelist
* then we'll have to free entries individually
* or determine how many entries or chunks we have
* grown since the initial allocation.
*/
sizeof (ipsec_fragcache_entry_t) *
/* Free the fragcache structure */
sizeof (ipsec_fragcache_entry_t *) *
}
}
/*
* Add a fragment to the fragment cache. Consumes mp if NULL is returned.
* Returns mp if a whole fragment has been assembled, NULL otherwise
*/
mblk_t *
{
int i;
int offset;
int last;
} else {
return (NULL);
}
&v6_proto_p)) {
/*
* Find upper layer protocol.
* If it fails we have a malformed packet
*/
return (NULL);
} else {
v6_proto = *v6_proto_p;
}
/*
* We think this is a fragment, but didn't find
* a fragment header. Something is wrong.
*/
return (NULL);
}
}
/* Anything to cleanup? */
/*
* This cleanup call could be put in a timer loop
* but it may actually be just as reasonable a decision to
* leave it here. The disadvantage is this only gets called when
* frags are added. The advantage is that it is not
* susceptible to race conditions like a time-based cleanup
* may be.
*/
itpf_time = gethrestime_sec();
/* Lookup to see if there is an existing entry */
if (is_v4)
else
if (is_v4) {
break;
} else {
break;
}
}
if (is_v4) {
#ifdef FRAGCACHE_DEBUG
iph->ipha_ident);
#endif
} else {
sizeof (ip6_t) - ip6_hdr_length;
#ifdef FRAGCACHE_DEBUG
"last = %d, id = %d, fraghdr = %p, spare_mp = %p\n",
#endif
}
/* check for bogus fragments and delete the entry */
return (NULL);
}
/* Not found, allocate a new entry */
/* see if there is some space */
return (NULL);
}
}
if (is_v4) {
sizeof (struct in_addr));
sizeof (struct in_addr));
} else {
sizeof (struct in6_addr));
sizeof (struct in6_addr));
}
itpf_time = gethrestime_sec();
fep->itpfe_last = 0;
fep->itpfe_depth = 0;
}
/* Insert it in the frag list */
/* List is in order by starting offset of fragments */
int nfirstbyte, nlastbyte;
int hdr_len;
/*
* Determine outer header type and length and set
* pointers appropriately
*/
hdr_len = ((outer_hdr_len != 0) ?
IPH_HDR_LENGTH(oiph) : 0);
} else {
return (NULL);
}
}
/*
* Determine inner header type and length and set
* pointers appropriately
*/
if (is_v4) {
/* Was v6 outer */
}
} else {
return (NULL);
}
&nip6_hdr_length, &nv6_proto_p)) {
return (NULL);
}
sizeof (ip6_t) - nip6_hdr_length;
}
/* Check for overlapping fragments */
/*
* Overlap Check:
* ~~~~--------- # Check if the newly
* ~ ndata_mp| # received fragment
* ~~~~--------- # overlaps with the
* ---------~~~~~~ # current fragment.
* | mp ~
* ---------~~~~~~
*/
if (is_v4) {
} else {
}
firstbyte)) {
/* Overlapping data does not match */
return (NULL);
}
/* Part of defense for jolt2.c fragmentation attack */
/*
* Check for identical or subset fragments:
* ---------- ~~~~--------~~~~~
* | nmp | or ~ nmp ~
* ---------- ~~~~--------~~~~~
* ---------- ------
* | mp | | mp |
* ---------- ------
*/
return (NULL);
}
}
/* Correct location for this fragment? */
if (firstbyte <= nfirstbyte) {
/*
* Check if the tail end of the new fragment overlaps
* with the head of the current fragment.
* --------~~~~~~~
* | nmp ~
* --------~~~~~~~
* ~~~~~--------
* ~ mp |
* ~~~~~--------
*/
if (lastbyte > nfirstbyte) {
/* Fragments overlap */
if (is_v4) {
} else {
}
- nfirstbyte)) {
/* Overlap mismatch */
return (NULL);
}
}
/*
* Fragment does not illegally overlap and can now
* be inserted into the chain
*/
break;
}
}
} else {
}
if (last)
/* Part of defense for jolt2.c fragmentation attack */
return (NULL);
}
/* Check for complete packet */
if (!fep->itpfe_last) {
#ifdef FRAGCACHE_DEBUG
#endif
return (NULL);
}
#ifdef FRAGCACHE_DEBUG
#endif
offset = 0;
int hdr_len;
hdr_len = ((outer_hdr_len != 0) ?
IPH_HDR_LENGTH(oiph) : 0);
} else {
return (NULL);
}
&ip6_hdr_length, &v6_proto_p);
}
if (is_v4) {
/* Was v6 outer */
}
} else {
return (NULL);
}
&ip6_hdr_length, &v6_proto_p)) {
return (NULL);
}
v6_proto = *v6_proto_p;
sizeof (ip6_t) - ip6_hdr_length;
}
/*
* If this fragment is greater than current offset,
* we have a missing fragment so return NULL
*/
#ifdef FRAGCACHE_DEBUG
/*
* Note, this can happen when the last frag
* gets sent through because it is smaller
* than the MTU. It is not necessarily an
* error condition.
*/
"missing fragment: firstbyte = %d, offset = %d, "
#endif
return (NULL);
}
/*
* If we are at the last fragment, we have the complete
* packet, so rechain things and return it to caller
* for processing
*/
/* It is an invalid "ping-o-death" packet */
/* Discard it */
return (NULL);
}
#ifdef FRAGCACHE_DEBUG
#endif
/*
* For inbound case, mp has ipsec_in b_next'd chain
* For outbound case, it is just data mp chain
*/
return (mp);
}
/*
* Update new ending offset if this
* fragment extends the packet
*/
}
/* Didn't find last fragment, so return NULL */
return (NULL);
}
static void
{
int i;
int earlyexp;
int earlyi = 0;
itpf_time = gethrestime_sec();
for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
while (fep) {
/* found */
} else {
earlyi = i;
}
}
}
}
/* if (!found) */
}
static ipsec_fragcache_entry_t *
{
/* Free up any fragment list still in cache entry */
/* unlink from head of hash chain */
/* link into free list */
return (nextp);
}
/* maybe should use double linked list to make update faster */
/* must be past front of chain */
while (targp) {
/* unlink from hash chain */
/* link into free list */
return (nextp);
}
}
/* NOTREACHED */
return (NULL);
}