/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/sysmacros.h>
#include <net/pfpolicy.h>
#include <inet/ipsec_info.h>
#include <inet/ipsec_impl.h>
#include <inet/ipsecesp.h>
#include <inet/ipclassifier.h>
/*
* This source file contains Security Association Database (SADB) common
* routines. They are linked in with the AH module. Since AH has no chance
* of falling under export control, it was safe to link it in there.
*/
netstack_t *);
netstack_t *);
static void lifetime_fuzz(ipsa_t *);
static void init_ipsa_pair(ipsap_t *);
static void destroy_ipsa_pair(ipsap_t *);
/*
* ipsacq_maxpackets is defined here to make it tunable
*/
extern uint64_t ipsacq_maxpackets;
} \
}
else \
} \
}
/* wrap the macro so we can pass it as a function pointer */
void
{
}
/*
* We presume that sizeof (long) == sizeof (time_t) and that time_t is
* a signed type.
*/
/*
* PF_KEY gives us lifetimes in uint64_t seconds. We presume that
* time_t is defined to be a signed type with the same range as
* "long". On ILP32 systems, we thus run the risk of wrapping around
* at end of time, as well as "overwrapping" the clock back around
* into a seemingly valid but incorrect future date earlier than the
* desired expiration.
*
* In order to avoid odd behavior (either negative lifetimes or loss
* of high order bits) when someone asks for bizarrely long SA
* lifetimes, we do a saturating add for expire times.
*
* We presume that ILP32 systems will be past end of support life when
* the 32-bit time_t overflows (a dangerous assumption, mind you..).
*
* On LP64, 2^64 seconds are about 5.8e11 years, at which point we
* will hopefully have figured out clever ways to avoid the use of
* fixed-sized integers in computation.
*/
static time_t
{
/*
* Clip delta to the maximum possible time_t value to
* prevent "overwrapping" back into a shorter-than-desired
* future time.
*/
/*
* This sum may still overflow.
*/
/*
* .. so if the result is less than the base, we overflowed.
*/
return (sum);
}
/*
* Callers of this function have already created a working security
* association, and have found the appropriate table & hash chain. All this
* function does is check duplicates, and insert the SA. The caller needs to
* hold the hash bucket lock and increment the refcnt before insertion.
*
* Return 0 if success, EEXIST if collision.
*/
int
{
/*
* Find insertion point (pointed to with **ptpn). Insert at the head
* of the list unless there's an unspecified source address, then
* insert it after the last SA with a specified source address.
*
* BTW, you'll have to walk the whole chain, matching on {DST, SPI}
* checking for collisions.
*/
return (EEXIST);
}
}
}
}
return (0);
}
/*
* Free a security association. Its reference count is 0, which means
* I must free it. The SA must be unlocked and must not be linked into
* any fanout list.
*/
static void
{
}
}
}
/* bzero() these fields for paranoia's sake. */
}
}
}
}
}
}
/*
* Unlink a security association from a hash bucket. Assume the hash bucket
* lock is held, but the association's lock is not.
*
* Note that we do not bump the bucket's generation number here because
* we might not be making a visible change to the set of visible SA's.
* All callers MUST bump the bucket's generation number before they unlock
* the bucket if they use sadb_unlinkassoc to permanetly remove an SA which
* was present in the bucket at the time it was locked.
*/
void
{
/* These fields are protected by the link lock. */
}
/* This may destroy the SA. */
}
void
{
if (cl_inet_deletespi &&
}
}
/*
* Create a larval security association with the specified SPI. All other
* fields are zeroed.
*/
static ipsa_t *
netstack_t *ns)
{
/*
* Allocate...
*/
/* Can't make new larval SA. */
return (NULL);
}
/* Assigned requested SPI, assume caller does SPI allocation magic. */
/*
* Copy addresses...
*/
/*
* Set common initialization values, including refcnt.
*/
/*
* There aren't a lot of other common initialization values, as
* they are copied in from the PF_KEY message.
*/
return (newbie);
}
/*
* Call me to initialize a security association fanout.
*/
static int
{
int i;
return (ENOMEM);
for (i = 0; i < size; i++) {
}
return (0);
}
/*
* Call me to initialize an acquire fanout
*/
static int
{
int i;
return (ENOMEM);
for (i = 0; i < size; i++) {
}
return (0);
}
/*
* Attempt to initialize an SADB instance. On failure, return ENOMEM;
* caller must clean up partial allocations.
*/
static int
{
return (ENOMEM);
return (ENOMEM);
return (ENOMEM);
return (0);
}
/*
* Call me to initialize an SADB instance; fall back to default size on failure.
*/
static void
netstack_t *ns)
{
if (size < IPSEC_DEFAULT_HASH_SIZE)
"Unable to allocate %u entry IPv%u %s SADB hash table",
}
}
/*
* Initialize an SADB-pair.
*/
void
{
if (type == SADB_SATYPE_AH) {
} else {
}
}
/*
* Deliver a single SADB_DUMP message representing a single SA. This is
* called many times by sadb_dump().
*
* If the return value of this is ENOBUFS (not the same as ENOMEM), then
* the caller should take that as a hint that dupb() on the "original answer"
* failed, and that perhaps the caller should try again with a copyb()ed
* "original answer".
*/
static int
{
return (ENOBUFS);
return (ENOMEM);
}
/* Just do a putnext, and let keysock deal with flow control. */
return (0);
}
/*
* Common function to allocate and prepare a keysock_out_t M_CTL message.
*/
mblk_t *
{
}
return (mp);
}
/*
* Perform an SADB_DUMP, spewing out every SA in an array of SA fanouts
* to keysock.
*/
static int
{
int i, error = 0;
/*
* For each IPSA hash bucket do:
* - Hold the mutex
* - Walk each entry, doing an sadb_dump_deliver() on it.
*/
if (original_answer == NULL)
return (ENOMEM);
current = gethrestime_sec();
for (i = 0; i < num_entries; i++) {
continue;
if ((active_time != 0) &&
continue;
/* Ran out of dupb's. Try a copyb. */
if (new_original_answer == NULL) {
} else {
}
}
if (error != 0)
break; /* out of for loop. */
}
if (error != 0)
break; /* out of for loop. */
}
return (error);
}
/*
* Dump an entire SADB; outbound first, then inbound.
*/
int
{
int error;
}
/* Dump outbound */
if (error)
return (error);
/* Dump inbound */
}
/*
* Generic sadb table walker.
*
* Call "walkfn" for each SA in each bucket in "table"; pass the
* bucket, the entry and "cookie" to the callback function.
* Take care to ensure that walkfn can delete the SA without screwing
* up our traverse.
*
* The bucket is locked for the duration of the callback, both so that the
* callback can just call sadb_unlinkassoc() when it wants to delete something,
* and so that no new entries are added while we're walking the list.
*/
static void
void *cookie)
{
int i;
for (i = 0; i < numentries; i++) {
}
}
}
/*
* Call me to free up a security association fanout. Use the forever
* variable to indicate freeing up the SAs (forever == B_FALSE, e.g.
* an SADB_FLUSH message), or destroying everything (forever == B_TRUE,
* when a module is unloaded).
*/
static void
{
int i;
return;
for (i = 0; i < numentries; i++) {
if (inbound && cl_inet_deletespi &&
NULL);
}
}
if (forever)
}
if (forever) {
}
}
/*
* Entry points to sadb_destroyer().
*/
static void
{
/*
* Flush out each bucket, one at a time. Were it not for keysock's
* enforcement, there would be a subtlety where I could add on the
* heels of a flush. With keysock's enforcement, however, this
* makes ESP's job easy.
*/
/* For each acquire, destroy it; leave the bucket mutex alone. */
}
static void
{
/* For each acquire, destroy it, including the bucket mutex. */
}
void
{
}
void
{
}
}
/*
* Check hard vs. soft lifetimes. If there's a reality mismatch (e.g.
* soft lifetimes > hard lifetimes) return an appropriate diagnostic for
* EINVAL.
*/
int
{
return (0);
if (hard->sadb_lifetime_allocations != 0 &&
soft->sadb_lifetime_allocations != 0 &&
return (SADB_X_DIAGNOSTIC_ALLOC_HSERR);
if (hard->sadb_lifetime_bytes != 0 &&
soft->sadb_lifetime_bytes != 0 &&
return (SADB_X_DIAGNOSTIC_BYTES_HSERR);
if (hard->sadb_lifetime_addtime != 0 &&
soft->sadb_lifetime_addtime != 0 &&
return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR);
if (hard->sadb_lifetime_usetime != 0 &&
soft->sadb_lifetime_usetime != 0 &&
return (SADB_X_DIAGNOSTIC_USETIME_HSERR);
if (hard->sadb_lifetime_addtime != 0 &&
idle->sadb_lifetime_addtime != 0 &&
return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR);
if (soft->sadb_lifetime_addtime != 0 &&
idle->sadb_lifetime_addtime != 0 &&
return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR);
if (hard->sadb_lifetime_usetime != 0 &&
idle->sadb_lifetime_usetime != 0 &&
return (SADB_X_DIAGNOSTIC_USETIME_HSERR);
if (soft->sadb_lifetime_usetime != 0 &&
idle->sadb_lifetime_usetime != 0 &&
return (SADB_X_DIAGNOSTIC_USETIME_HSERR);
}
return (0);
}
/*
* Sanity check sensitivity labels.
*
* For now, just reject labels on unlabeled systems.
*/
int
{
if (!is_system_labeled()) {
return (SADB_X_DIAGNOSTIC_BAD_LABEL);
return (SADB_X_DIAGNOSTIC_BAD_LABEL);
}
return (0);
}
/*
* Clone a security association for the purposes of inserting a single SA
* into inbound and outbound tables respectively. This function should only
* be called from sadb_common_add().
*/
static ipsa_t *
{
return (NULL);
/* Copy over what we can. */
/* bzero and initialize locks, in case *_init() allocates... */
/*
* While somewhat dain-bramaged, the most graceful way to
* recover from errors is to keep plowing through the
* allocations, and getting what I can. It's easier to call
* sadb_freeassoc() on the stillborn clone when all the
* pointers aren't pointing to the parent's data.
*/
} else {
}
(char *)&newbie->ipsa_mac_len;
}
}
} else {
}
}
}
}
if (error) {
return (NULL);
}
return (newbie);
}
/*
* Initialize a SADB address extension at the address specified by addrext.
* Return a pointer to the end of the new address extension.
*/
static uint8_t *
{
int addrext_len;
int sin_len;
return (NULL);
return (NULL);
addrext->sadb_address_reserved = 0;
switch (af) {
case AF_INET:
return (NULL);
break;
case AF_INET6:
return (NULL);
break;
}
return (cur);
}
/*
* Construct a key management cookie extension.
*/
static uint8_t *
{
return (NULL);
return (NULL);
kmcext->sadb_x_kmc_reserved = 0;
return (cur);
}
/*
* Given an original message header with sufficient space following it, and an
* SA, construct a full PF_KEY message with all of the relevant extensions.
* This is mostly used for SADB_GET, and SADB_DUMP.
*/
static mblk_t *
{
/*
* The following are pointers into the PF_KEY message this PF_KEY
* message creates.
*/
sadb_ident_t *ident;
/* These indicate the presence of the above extension fields. */
/* First off, figure out the allocation length for this message. */
/*
* Constant stuff. This includes base, SA, address (src, dst),
* and lifetime (current).
*/
sizeof (sadb_lifetime_t);
switch (fam) {
case AF_INET:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
case AF_INET6:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
default:
return (NULL);
}
/*
* Allocate TWO address extensions, for source and destination.
* (Thus, the * 2.)
*/
alloclen += sizeof (sadb_x_pair_t);
} else {
}
/* How 'bout other lifetimes? */
alloclen += sizeof (sadb_lifetime_t);
}
alloclen += sizeof (sadb_lifetime_t);
}
alloclen += sizeof (sadb_lifetime_t);
} else {
}
/* Inner addresses. */
if (ipsa->ipsa_innerfam != 0) {
switch (pfam) {
case AF_INET6:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
case AF_INET:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
default:
"IPsec SADB: Proxy length failure.\n");
break;
}
}
/* For the following fields, assume that length != 0 ==> stuff */
if (ipsa->ipsa_authkeylen != 0) {
sizeof (uint64_t));
}
if (ipsa->ipsa_encrkeylen != 0) {
} else {
}
}
osensinteg = B_TRUE;
}
/*
* Must use strlen() here for lengths. Identities use NULL
* pointers to indicate their nonexistence.
*/
sizeof (uint64_t));
}
sizeof (uint64_t));
}
alloclen += sizeof (sadb_x_kmc_t);
if (ipsa->ipsa_replay != 0) {
alloclen += sizeof (sadb_x_replay_ctr_t);
}
/* Make sure the allocation length is a multiple of 8 bytes. */
/* XXX Possibly make it esballoc, with a bzero-ing free_ftn. */
return (NULL);
/* We do not support the concept. */
lt->sadb_lifetime_allocations = 0;
if (hard) {
lt++;
}
if (soft) {
lt++;
}
if (idle) {
lt++;
}
/* NOTE: Don't fill in ports here if we are a tunnel-mode SA. */
goto bail;
}
goto bail;
}
IPPROTO_UDP, 0);
goto bail;
}
}
IPPROTO_UDP, 0);
goto bail;
}
}
/* If we are a tunnel-mode SA, fill in the inner-selectors. */
if (isrc) {
goto bail;
}
}
if (idst) {
goto bail;
}
}
goto bail;
}
}
if (auth) {
key->sadb_key_reserved = 0;
}
if (encr) {
}
}
if (srcid) {
ident = (sadb_ident_t *)walker;
ident->sadb_ident_exttype = SADB_EXT_IDENTITY_SRC;
ident->sadb_ident_id = 0;
ident->sadb_ident_reserved = 0;
(void) strcpy((char *)(ident + 1),
}
if (dstid) {
ident = (sadb_ident_t *)walker;
ident->sadb_ident_exttype = SADB_EXT_IDENTITY_DST;
ident->sadb_ident_id = 0;
ident->sadb_ident_reserved = 0;
(void) strcpy((char *)(ident + 1),
}
if (sensinteg) {
}
if (osensinteg) {
if (ipsa->ipsa_mac_exempt)
}
if (paired) {
}
if (ipsa->ipsa_replay != 0) {
repl_ctr->sadb_x_rc_replay64 = 0;
}
bail:
/* Pardon any delays... */
return (mp);
}
/*
* Strip out key headers or unmarked headers (SADB_EXT_KEY_*, SADB_EXT_UNKNOWN)
* and adjust base message accordingly.
*
* Assume message is pulled up in one piece of contiguous memory.
*
* Say if we start off with:
*
* +------+----+-------------+-----------+---------------+---------------+
* | base | SA | source addr | dest addr | rsrvd. or key | soft lifetime |
* +------+----+-------------+-----------+---------------+---------------+
*
* we will end up with
*
* +------+----+-------------+-----------+---------------+
* | base | SA | source addr | dest addr | soft lifetime |
* +------+----+-------------+-----------+---------------+
*/
static void
{
int copylen;
/*
* Aha! I found a header to be erased.
*/
/*
* If I had a previous header to be erased,
* copy over it. I can get away with just
* copying backwards because the target will
* always be 8 bytes behind the source.
*/
copylen);
} else {
}
} else {
}
}
if (copylen != 0)
}
/* Adjust samsg. */
/* Assume all of the rest is cleared by caller in sadb_pfkey_echo(). */
}
/*
* AH needs to send an error to PF_KEY. Assume mp points to an M_CTL
* followed by an M_DATA with a PF_KEY message in it. The serial of
* the sending keysock instance is included.
*/
void
{
/*
* Enough functions call this to merit a NULL queue check.
*/
return;
}
/*
* Only send the base message up in the event of an error.
* Don't worry about bzero()-ing, because it was probably bogus
* anyway.
*/
if (diagnostic != SADB_X_DIAGNOSTIC_PRESET)
}
/*
* Send a successful return packet back to keysock via the queue in pfkey_q.
*
* Often, an SA is associated with the reply message, it's passed in if needed,
* and NULL if not. BTW, that ipsa will have its refcnt appropriately held,
* and the caller will release said refcnt.
*/
void
{
switch (samsg->sadb_msg_type) {
case SADB_ADD:
case SADB_UPDATE:
case SADB_X_UPDATEPAIR:
case SADB_X_DELPAIR_STATE:
case SADB_FLUSH:
case SADB_DUMP:
/*
* I have all of the message already. I just need to strip
* out the keying material and echo the message back.
*
* NOTE: for SADB_DUMP, the function sadb_dump() did the
* work. When DUMP reaches here, it should only be a base
* message.
*/
/* Assume PF_KEY message is contiguous. */
}
break;
case SADB_GET:
/*
* Do a lot of work here, because of the ipsa I just found.
* First construct the new PF_KEY message, then abandon
* the old one.
*/
return;
}
break;
case SADB_DELETE:
case SADB_X_DELPAIR:
goto justecho;
/*
* Because listening KMds may require more info, treat
* DELETE like a special case of GET.
*/
return;
}
break;
default:
return;
}
/* ksi is now null and void. */
/* We're ready to send... */
}
/*
* Set up a global pfkey_q instance for AH, ESP, or some other consumer.
*/
void
{
/*
* First, check atomically that I'm the first and only keysock
* instance.
*
* Use OTHERQ(q), because qreply(q, mp) == putnext(OTHERQ(q), mp),
* and I want this module to say putnext(*_pfkey_q, mp) for PF_KEY
* messages.
*/
return;
}
/*
* If we made it past the atomic_cas_ptr, then we have "exclusive"
* access to the timeout handle. Fire it off after the default ager
* interval.
*/
}
/*
* Normalize IPv4-mapped IPv6 addresses (and prefixes) as appropriate.
*
* Check addresses themselves for wildcard or multicast.
*/
int
netstack_t *ns)
{
/* Assign both sockaddrs, the compiler will do the right thing. */
/*
* Convert to an AF_INET sockaddr. This means the
* return messages will have the extra space, but have
* AF_INET sockaddrs instead of AF_INET6.
*
* Yes, RFC 2367 isn't clear on what to do here w.r.t.
* mapped addresses, but since AF_INET6 ::ffff:<v4> is
* equal to AF_INET <v4>, it shouldnt be a huge
* problem.
*/
normalized = B_TRUE;
}
switch (ext->sadb_ext_type) {
case SADB_EXT_ADDRESS_SRC:
break;
case SADB_EXT_ADDRESS_DST:
break;
break;
break;
break;
break;
/* There is no default, see above ASSERT. */
}
bail:
serial);
} else {
/*
* Scribble in sadb_msg that we got passed in.
* Overload "mp" to be an sadb_msg pointer.
*/
}
return (KS_IN_ADDR_UNKNOWN);
}
/*
* We need only check for prefix issues.
*/
/* Set diagnostic now, in case we need it later. */
if (normalized)
/*
* Verify and mask out inner-addresses based on prefix length.
*/
goto bail;
} else {
/*
* ip_plen_to_mask_v6() returns NULL if the value in
* question is out of range.
*/
goto bail;
}
/* We don't care in these cases. */
return (KS_IN_ADDR_DONTCARE);
}
/* Check the easy ones now. */
return (KS_IN_ADDR_MBCAST);
return (KS_IN_ADDR_UNSPEC);
/*
* At this point, we're a unicast IPv6 address.
*
* by what zone we're in when we go to zone-aware IPsec.
*/
IRE_LOCAL) {
/* Hey hey, it's local. */
return (KS_IN_ADDR_ME);
}
} else {
return (KS_IN_ADDR_UNSPEC);
return (KS_IN_ADDR_MBCAST);
/*
* At this point we're a unicast or broadcast IPv4 address.
*
* Check if the address is IRE_BROADCAST or IRE_LOCAL.
*
* by what zone we're in when we go to zone-aware IPsec.
*/
switch (type) {
case IRE_LOCAL:
return (KS_IN_ADDR_ME);
case IRE_BROADCAST:
return (KS_IN_ADDR_MBCAST);
}
}
return (KS_IN_ADDR_NOTME);
}
/*
* Address normalizations and reality checks for inbound PF_KEY messages.
*
* For the case of src == unspecified AF_INET6, and dst == AF_INET, convert
* the source to AF_INET. Do the same for the inner sources.
*/
{
int rc;
if (rc == KS_IN_ADDR_UNKNOWN)
return (B_FALSE);
if (rc == KS_IN_ADDR_MBCAST) {
return (B_FALSE);
}
}
if (rc == KS_IN_ADDR_UNKNOWN)
return (B_FALSE);
if (rc == KS_IN_ADDR_UNSPEC) {
return (B_FALSE);
}
}
/*
* NAT-Traversal addrs are simple enough to not require all of
* the checks in sadb_addrcheck(). Just normalize or reject if not
* AF_INET.
*/
/*
* Local NAT-T addresses never use an IRE_LOCAL, so it should
* always be NOTME, or UNSPEC (to handle both tunnel mode
* AND local-port flexibility).
*/
ksi->ks_in_serial);
return (B_FALSE);
}
src = (struct sockaddr_in *)
ksi->ks_in_serial);
return (B_FALSE);
}
}
/*
* Remote NAT-T addresses never use an IRE_LOCAL, so it should
* always be NOTME, or UNSPEC if it's a tunnel-mode SA.
*/
if (rc != KS_IN_ADDR_NOTME &&
rc == KS_IN_ADDR_UNSPEC)) {
ksi->ks_in_serial);
return (B_FALSE);
}
src = (struct sockaddr_in *)
ksi->ks_in_serial);
return (B_FALSE);
}
}
ksi->ks_in_serial);
return (B_FALSE);
}
== KS_IN_ADDR_UNKNOWN ||
return (B_FALSE);
isrc = (struct sockaddr_in *)
1);
idst = (struct sockaddr_in6 *)
1);
ksi->ks_in_serial);
return (B_FALSE);
}
ksi->ks_in_serial);
return (B_FALSE);
} else {
}
return (B_TRUE);
/* Can't set inner and outer ports in one SA. */
ksi->ks_in_serial);
return (B_FALSE);
}
return (B_TRUE);
if (srcext->sadb_address_proto == 0) {
} else if (dstext->sadb_address_proto == 0) {
} else {
/* Inequal protocols, neither were 0. Report error. */
ksi->ks_in_serial);
return (B_FALSE);
}
}
/*
* With the exception of an unspec IPv6 source and an IPv4
* destination, address families MUST me matched.
*/
return (B_FALSE);
}
/*
* Convert "src" to AF_INET INADDR_ANY. We rely on sin_port being
* in the same place for sockaddr_in and sockaddr_in6.
*/
return (B_TRUE);
}
/*
* Set the results in "addrtype", given an IRE as requested by
* sadb_addrcheck().
*/
int
{
return (KS_IN_ADDR_MBCAST);
return (KS_IN_ADDR_ME);
return (KS_IN_ADDR_NOTME);
}
/*
* Match primitives..
* !!! TODO: short term: inner selectors
* ipv6 scope id (ifindex)
* longer term: zone id. sensitivity label. uid.
*/
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
#define M(a, b) (((a) == 0) || ((b) == 0) || ((a) == (b)))
#undef M
}
/*
* Common function which extracts several PF_KEY extensions for ease of
* SADB matching.
*
* XXX TODO: weed out ipsa_query_t fields not used during matching
* or afterwards?
*/
int
{
int i;
for (i = 0; i < IPSA_NMATCH; i++)
return (EINVAL);
}
return (EINVAL);
}
return (EINVAL);
}
*mfpp++ = sadb_match_spi;
}
else {
}
else {
}
else
*mfpp++ = sadb_match_dst_v6;
} else {
match &= ~IPSA_Q_DST;
}
return (EINVAL);
}
*mfpp++ = sadb_match_src_v6;
} else {
match &= ~IPSA_Q_SRC;
}
} else {
*mfpp++ = sadb_match_dst_v4;
} else {
match &= ~IPSA_Q_DST;
}
return (EINVAL);
}
*mfpp++ = sadb_match_src_v4;
} else {
match &= ~IPSA_Q_SRC;
}
}
*mfpp++ = sadb_match_dstid;
}
*mfpp++ = sadb_match_srcid;
}
*mfpp++ = sadb_match_kmc;
}
else
} else {
}
if (match & IPSA_Q_INBOUND) {
} else {
}
if (match & IPSA_Q_OUTBOUND) {
} else {
}
} else {
}
return (0);
}
/*
* Match an initialized query structure with a security association;
* return B_TRUE on a match, B_FALSE on a miss.
* Applies match functions set up by sadb_form_query() until one returns false.
*/
{
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Assumes that we're called with *head locked, no other locks held;
* Conveniently, and not coincidentally, this is both what sadb_walker
* gives us and also what sadb_unlinkassoc expects.
*/
struct sadb_purge_state
{
};
static void
{
return;
}
}
}
/*
* Common code to purge an SA with a matching src or dst address.
* Don't kill larval SA's in such a purge.
*/
int
{
if (error != 0)
return (error);
/*
* This is simple, crude, and effective.
* Unimplemented optimizations (TBD):
* - we can limit how many places we search based on where we
* think the SA is filed.
* - if we get a dst address, we can hash based on dst addr to find
* the correct bucket in the outbound table.
*/
NULL);
return (0);
}
static void
{
return;
}
/*
* The isaf_t *, which is passed in , is always an outbound bucket,
* and we are preserving the outbound-then-inbound hash-bucket lock
* ordering. The sadb_walker() which triggers this function is called
* only on the outbound fanout, and the corresponding inbound bucket
* lock is safe to acquire here.
*/
if (entry->ipsa_haspeer) {
} else {
}
if (peer_assoc != NULL) {
}
}
static int
{
int error;
if (error != 0)
return (error);
return (0);
}
/*
*/
int
{
if (sadb_msg_type == SADB_X_DELPAIR_STATE)
&sq, diagnostic);
if (error != 0)
return (error);
if (error != 0) {
return (error);
}
if (echo_target == NULL)
/*
* Bucket locks will be required if SA is actually unlinked.
* get_ipsa_pair() returns valid hash bucket pointers even
* if it can't find a pair SA pointer. To prevent a potential
* deadlock, always lock the outbound bucket before the inbound.
*/
if (ipsapp.in_inbound_table) {
} else {
}
}
/*
* sadb_torch_assoc() releases the ipsa_lock
* and calls sadb_unlinkassoc() which does a
* IPSA_REFRELE.
*/
}
if (sadb_msg_type == SADB_X_DELPAIR ||
}
} else {
/*
* Only half of the "pair" has been deleted.
* Update the remaining SA and remove references
* to its pair SA, which is now gone.
*/
}
} else if (sadb_msg_type == SADB_X_DELPAIR) {
}
}
if (error == 0)
return (error);
}
/*
* This function takes a sadb_sa_t and finds the ipsa_t structure
* and the isaf_t (hash bucket) that its stored under. If the security
* association has a peer, the ipsa_t structure and bucket for that security
* association are also searched for. The "pair" of ipsa_t's and isaf_t's
* are returned as a ipsap_t.
*
* The hash buckets are returned for convenience, if the calling function
* needs to use the hash bucket locks, say to remove the SA's, it should
* take care to observe the convention of locking outbound bucket then
* inbound bucket. The flag in_inbound_table provides direction.
*
* Note that a "pair" is defined as one (but not both) of the following:
*
* A security association which has a soft reference to another security
* association via its SPI.
*
* A security association that is not obviously "inbound" or "outbound" so
* it appears in both hash tables, the "peer" being the same security
* association in the other hash table.
*
* This function will return NULL if the ipsa_t can't be found in the
* inbound or outbound hash tables (not found). If only one ipsa_t is
* found, the pair ipsa_t will be NULL. Both isaf_t values are valid
* provided at least one ipsa_t is found.
*/
static int
{
/* Lock down both buckets. */
} else {
}
} else {
/* IPSA_F_OUTBOUND is set *or* no directions flags set. */
} else {
}
}
return (ESRCH);
}
return (0);
}
/*
* haspeer implies no sa_pairing, look for same spi
* in other hashtable.
*/
return (0);
}
if (pair_spi == 0) {
return (0);
}
/* found sa in outbound sadb, peer should be inbound */
if (ipsapp->in_inbound_table) {
/* Found SA in inbound table, pair will be in outbound. */
*(uint32_t *)pair_srcaddr);
} else {
*(uint32_t *)pair_srcaddr);
}
} else {
}
return (0);
}
/*
* Perform NAT-traversal cached checksum offset calculations here.
*/
static void
{
if (natt_rem_ext != NULL) {
/* Ensured by sadb_addrfix(). */
l_src = *src_addr_ptr;
l_rem = *natt_rem_ptr;
/* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */
/*
* We're 1's complement for checksums, so check for wraparound
* here.
*/
l_src--;
}
if (natt_loc_ext != NULL) {
/* Ensured by sadb_addrfix(). */
/* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */
/*
* NAT-T port agility means we may have natt_loc_ext, but
* only for a local-port change.
*/
/*
* We're 1's complement for checksums, so check for
* wraparound here.
*/
l_dst--;
}
}
}
/*
* This function is called from consumers that need to insert a fully-grown
* security association into its tables. This function takes into account that
* SAs can be "inbound", "outbound", or "both". The "primary" and "secondary"
* hash bucket parameters are set in order of what the SA will be most of the
* time. (For example, an SA with an unspecified source, and a multicast
* destination will primarily be an outbound SA. OTOH, if that destination
* is unicast for this node, then the SA will primarily be inbound.)
*
* It takes a lot of parameters because even if clone is B_FALSE, this needs
* to check both buckets for purposes of collision.
*
* Return 0 upon success. Return various errnos (ENOMEM, EEXIST) for
* various error conditions. We may need to set samsg->sadb_x_msg_diagnostic
* with additional diagnostic information because there is at least one EINVAL
* case here.
*/
int
{
int salt_offset;
int error = 0;
int rcode;
return (EINVAL);
}
return (EINVAL);
}
return (EINVAL);
}
} else {
}
} else {
}
if (rcode == -1) {
return (EEXIST);
}
}
/*
* Check to see if the new SA will be cloned AND paired. The
* reason a SA will be cloned is the source or destination addresses
* are not specific enough to determine if the SA goes in the outbound
* or the inbound hash table, so its cloned and put in both. If
* the SA is paired, it's soft linked to another SA for the other
* direction. Keeping track and looking up SA's that are direction
* unspecific and linked is too hard.
*/
return (EINVAL);
}
if (!isupdate) {
return (ENOMEM);
}
if (srcext->sadb_address_proto != 0) {
/*
* Mismatched outer-packet protocol
* and inner-packet address family.
*/
error = EPROTOTYPE;
*diagnostic =
goto error;
} else {
/* Fill in with explicit protocol. */
}
}
} else {
if (srcext->sadb_address_proto != 0) {
/*
* Mismatched outer-packet protocol
* and inner-packet address family.
*/
error = EPROTOTYPE;
*diagnostic =
goto error;
} else {
/* Fill in with explicit protocol. */
}
}
}
/* Unique value uses inner-ports for Tunnel Mode... */
} else {
/* ... and outer-ports for Transport Mode. */
}
goto error;
}
goto error;
}
goto error;
}
/*
* If unspecified source address, force replay_wsize to 0.
* This is because an SA that has multiple sources of secure
* traffic cannot enforce a replay counter w/o synchronizing the
* senders.
*/
else
newbie->ipsa_replay_wsize = 0;
}
/*
* XXX CURRENT lifetime checks MAY BE needed for an UPDATE.
* The spec says that one can update current lifetimes, but
* that seems impractical, especially in the larval-to-mature
* update that this function performs.
*/
}
}
}
#ifdef IPSEC_LATENCY_TEST
#else
#endif
/* In case we have to round up to the next byte... */
goto error;
}
/*
* Pre-initialize the kernel crypto framework key
* structure.
*/
[newbie->ipsa_auth_alg];
(char *)&newbie->ipsa_mac_len;
} else {
}
if (error != 0) {
/*
* An error here indicates that alg is the wrong type
* (IE: not authentication) or its not in the alg tables
* created by ipsecalgs(1m), or Kcf does not like the
* parameters passed in with this algorithm, which is
* probably a coding error!
*/
goto error;
}
}
[newbie->ipsa_encr_alg];
}
} else {
}
/*
* The byte stream following the sadb_key_t is made up of:
* key bytes, [salt bytes], [IV initial value]
* All of these have variable length. The IV is typically
* randomly generated by this function and not passed in.
* By supporting the injection of a known IV, the whole
* IPsec subsystem and the underlying crypto subsystem
* can be tested with known test vectors.
*
* The keying material has been checked by ext_check()
* bits, whats left is the encryption key. If this is too
* short, ipsec_create_ctx_tmpl() will fail and the SA
* won't get created.
*
* set ipsa_encrkeylen to length of key only.
*/
/* In case we have to round up to the next byte... */
goto error;
}
/*
* Combined mode algs need a nonce. Copy the salt and
* IV into a buffer. The ipsa_nonce is a pointer into
* this buffer, some bytes at the start of the buffer
* may be unused, depends on the salt length. The IV
* is 64 bit aligned so it can be incremented as a
* uint64_t. Zero out key in samsg_t before freeing.
*/
sizeof (ipsec_nonce_t), KM_NOSLEEP);
goto error;
}
/*
* Initialize nonce and salt pointers to point
* to the nonce buffer. This is just in case we get
* bad data, the pointers will be valid, the data
* won't be.
*
* See sadb.h for layout of nonce.
*/
if (newbie->ipsa_saltlen != 0) {
}
/*
* repeat. Get a random value for the IV, make a
* wraps back to the initial value. If an Initial IV
* is passed in via PF_KEY, save this in the SA.
* Initialising IV for inbound is pointless as its
* taken from the inbound packet.
*/
if (!is_inbound) {
if (ekey->sadb_key_reserved != 0) {
} else {
(void) random_get_pseudo_bytes(
}
}
}
/*
* Pre-initialize the kernel crypto framework key
* structure.
*/
if (error != 0) {
/* See above for error explanation. */
goto error;
}
}
if (async)
/*
* Ptrs to processing functions.
*/
else
/*
* Certificate ID stuff.
*/
/*
* Can assume strlen() will return okay because ext_check() in
* keysock.c prepares the string for us.
*/
goto error;
}
}
/*
* Can assume strlen() will return okay because ext_check() in
* keysock.c prepares the string for us.
*/
goto error;
}
}
/*
* sensitivity label handling code:
* Convert sens + bitmap into cred_t, and associate it
* with the new SA.
*/
}
/*
* Likewise for outer sensitivity.
*/
}
if (error != 0) {
goto error;
}
if (effective_tsl != NULL) {
tsl = effective_tsl;
}
}
/*
* For exclusive stacks we set the zoneid to zero to operate
* as if in the global zone for tsol_compute_label_v4/v6
*/
} else {
}
if (error != 0) {
goto error;
}
}
if ((replayext->sadb_x_rc_replay32 == 0) &&
(replayext->sadb_x_rc_replay64 != 0)) {
error = EOPNOTSUPP;
goto error;
}
}
/* now that the SA has been updated, set its new state */
if (clone) {
} else {
if (!is_inbound) {
}
}
/*
* The less locks I hold when doing an insertion and possible cloning,
* the better!
*/
if (clone) {
if (newbie_clone == NULL) {
goto error;
}
}
/*
* Enter the bucket locks. The order of entry is outbound,
* inbound. We map "primary" and "secondary" into outbound and inbound
* based on the destination address type. If the destination address
* type is for a node that isn't mine (or potentially mine), the
* "primary" bucket is the outbound one.
*/
if (!is_inbound) {
/* primary == outbound */
} else {
/* primary == inbound */
}
/*
* sadb_insertassoc() doesn't increment the reference
* count. We therefore have to increment the
* reference count one more time to reflect the
* pointers of the table that reference this SA.
*/
if (isupdate) {
/*
* Unlink from larval holding cell in the "inbound" fanout.
*/
}
if (error != 0) {
/*
* Since sadb_insertassoc() failed, we must decrement the
* refcount again so the cleanup code will actually free
* the offending SA.
*/
goto error_unlock;
}
if (newbie_clone != NULL) {
if (error != 0) {
/* Collision in secondary table. */
goto error_unlock;
}
} else {
/* Collision in secondary table. */
/* Set the error, since ipsec_getassocbyspi() can't. */
goto error_unlock;
}
}
/* OKAY! So let's do some reality check assertions. */
/*
* We can exit the locks in any order. Only entrance needs to
* follow any protocol.
*/
/* update pair_spi if it exists. */
if (error)
return (error);
if (error != 0)
goto error;
} else {
/* update_pairing() sets diagnostic */
}
}
/* Common error point for this routine. */
if (error != 0) {
/* This SA is broken, let the reaper clean up. */
}
}
if (newbie_clone != NULL) {
}
if (error == 0) {
/*
* Construct favorable PF_KEY return message and send to
* keysock. Update the flags in the original keysock message
* to reflect the actual flags in the new SA.
* (Q: Do I need to pass "newbie"? If I do,
* make sure to REFHOLD, call, then REFRELE.)
*/
}
return (error);
}
/*
* Set the time of first use for a security association. Update any
* expiration times as a result.
*/
void
{
/*
* Caller does check usetime before calling me usually, and
* double-checking is better than a mutex_enter/exit hit.
*/
if (assoc->ipsa_usetime == 0) {
/*
* This is redundant for outbound SA's, as
* ipsec_getassocbyconn() sets the IPSA_F_USED flag already.
* Inbound SAs, however, have no such protection.
*/
/*
* After setting the use time, see if we have a use lifetime
* that would cause the actual SA expiration time to shorten.
*/
}
}
/*
* Send up a PF_KEY expire message for this association.
*/
static void
{
/* Don't bother sending if there's no queue. */
return;
mp = sadb_keysock_out(0);
/* cmn_err(CE_WARN, */
/* "sadb_expire_assoc: Can't allocate KEYSOCK_OUT.\n"); */
return;
}
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* Won't happen unless there's a kernel bug. */
"sadb_expire_assoc: Unknown address length.\n");
return;
}
if (tunnel_mode) {
switch (assoc->ipsa_innerfam) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* Won't happen unless there's a kernel bug. */
"Unknown inner address length.\n");
return;
}
}
/* cmn_err(CE_WARN, */
/* "sadb_expire_assoc: Can't allocate message.\n"); */
return;
}
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_reserved = 0;
samsg->sadb_msg_seq = 0;
samsg->sadb_msg_pid = 0;
/* We do not support the concept. */
} else {
expire->sadb_lifetime_bytes = 0;
}
if (tunnel_mode) {
}
/* Can just putnext, we're ready to go! */
}
/*
* "Age" the SA with the number of bytes that was used to protect traffic.
* Send an SADB_EXPIRE message if appropriate. Return B_TRUE if there was
* enough "charge" left in the SA to protect the data. Return B_FALSE
* otherwise. (If B_FALSE is returned, the association either was, or became
* DEAD.)
*/
{
if (assoc->ipsa_hardbyteslt != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread. Also
* unlink this SA immediately.
*/
if (sendmsg)
/*
* Set non-zero expiration time so sadb_age_assoc()
* will work when reaping.
*/
} /* Else someone beat me to it! */
} else if (assoc->ipsa_softbyteslt != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread.
*/
if (sendmsg)
} /* Else someone beat me to it! */
}
return (rc);
}
/*
* "Torch" an individual SA. Returns NULL, so it can be tail-called from
* sadb_age_assoc().
*/
static ipsa_t *
{
/*
* Force cached SAs to be revalidated..
*/
return (NULL);
}
/*
* Do various SA-is-idle activities depending on delta (the number of idle
*
* Return B_TRUE if I've sent a packet, because I have to drop the
* association's mutex before sending a packet out the wire.
*/
/* ARGSUSED */
static boolean_t
{
delta >= nat_t_interval &&
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Return "assoc" if haspeer is true and I send an expire. This allows
* the consumers' aging functions to tidy up an expired SA's peer.
*/
static ipsa_t *
{
(assoc->ipsa_hardexpiretime != 0))) &&
}
/*
* Check lifetimes. Fortunately, SA setup is done
* such that there are only two times to look at,
* softexpiretime, and hardexpiretime.
*
* Check hard first.
*/
if (assoc->ipsa_hardexpiretime != 0 &&
if (inbound) {
}
/*
* Send SADB_EXPIRE with hard lifetime, delay for unlinking.
*/
/*
* If the SA is paired or peered with another, put
* a copy on a list which can be processed later, the
* at the same time.
*
* If I return assoc, I have to bump up its reference
* count to keep with the ipsa_t reference count
* semantics.
*/
}
} else if (assoc->ipsa_softexpiretime != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread.
*/
if (assoc->ipsa_haspeer) {
/*
* If the SA has a peer, update the peer's state
* on SOFT_EXPIRE, this is mostly to prevent two
* expire messages from effectively the same SA.
*
* Don't care about paired SA's, then can (and should)
* be able to soft expire at different times.
*
* If I return assoc, I have to bump up its
* reference count to keep with the ipsa_t reference
* count semantics.
*/
}
} else if (assoc->ipsa_idletime != 0 &&
}
/*
* Need to handle Mature case
*/
}
} else {
/* Check idle time activities. */
}
if (!dropped_mutex)
return (retval);
}
/*
* Called by a consumer protocol to do ther dirty work of reaping dead
* Security Associations.
*
* NOTE: sadb_age_assoc() marks expired SA's as DEAD but only removed
* SA's that are already marked DEAD, so expired SA's are only reaped
* the second time sadb_ager() runs.
*/
void
{
int i;
/* Snapshot current time now. */
haspeerlist = NULL;
/*
* Do my dirty work. This includes aging real entries, aging
* larvals, and aging outstanding ACQUIREs.
*
* I hope I don't tie up resources for too long.
*/
/* Age acquires. */
for (i = 0; i < sp->sdb_hashsize; i++) {
}
}
/* Age inbound associations. */
for (i = 0; i < sp->sdb_hashsize; i++) {
/*
* Put SA's which have a peer or SA's which
* are paired on a list for processing after
* all the hash tables have been walked.
*
* sadb_age_assoc() increments the refcnt,
* effectively doing an IPSA_REFHOLD().
*/
/*
* Don't forget to REFRELE().
*/
continue; /* for loop... */
}
}
}
}
haspeerlist = NULL;
/* Age outbound associations. */
for (i = 0; i < sp->sdb_hashsize; i++) {
/*
* sadb_age_assoc() increments the refcnt,
* effectively doing an IPSA_REFHOLD().
*/
/*
* Don't forget to REFRELE().
*/
continue; /* for loop... */
}
}
}
}
/*
* Run a GC pass to clean out dead identities.
*/
}
/*
* Figure out when to reschedule the ager.
*/
{
/*
* See how long this took. If it took too long, increase the
* aging interval.
*/
/* XXX Rate limit this? Or recommend flush? */
"Too many SA's to age out in %d msec.\n",
intmax);
} else {
/* Double by shifting by one bit. */
interval <<= 1;
}
/*
* If I took less than half of the interval, then I should
* ratchet the interval back down. Never automatically
* shift below the default aging interval.
*
* NOTE:This even overrides manual setting of the age
* interval using NDD to lower the setting past the
* default. In other words, if you set the interval
* lower than the default, and your SADB gets too big,
* the interval will only self-lower back to the default.
*/
/* Halve by shifting one bit. */
interval >>= 1;
}
}
/*
* Update the lifetime values of an SA. This is the path an SADB_UPDATE
* message takes when updating a MATURE or DYING SA.
*/
static void
{
/*
* XXX RFC 2367 mentions how an SADB_EXT_LIFETIME_CURRENT can be
* passed in during an update message. We currently don't handle
* these.
*/
if (hard->sadb_lifetime_bytes != 0)
if (hard->sadb_lifetime_usetime != 0)
if (hard->sadb_lifetime_addtime != 0)
if (assoc->ipsa_hardaddlt != 0) {
}
if (assoc->ipsa_harduselt != 0 &&
}
if (hard->sadb_lifetime_allocations != 0)
}
if (soft->sadb_lifetime_bytes != 0) {
if (soft->sadb_lifetime_bytes >
} else {
}
}
if (soft->sadb_lifetime_usetime != 0) {
if (soft->sadb_lifetime_usetime >
assoc->ipsa_harduselt) {
} else {
}
}
if (soft->sadb_lifetime_addtime != 0) {
if (soft->sadb_lifetime_addtime >
} else {
}
}
if (assoc->ipsa_softaddlt != 0) {
}
if (assoc->ipsa_softuselt != 0 &&
}
}
if (soft->sadb_lifetime_allocations != 0)
}
}
if (idle->sadb_lifetime_addtime != 0)
if (idle->sadb_lifetime_usetime != 0)
if (assoc->ipsa_idleaddlt != 0) {
}
if (assoc->ipsa_idleuselt != 0) {
if (assoc->ipsa_idletime != 0) {
} else {
}
}
}
}
static int
{
int rcode = 0;
switch (new_state) {
}
break;
case SADB_X_SASTATE_IDLE:
} else {
}
break;
case SADB_X_SASTATE_ACTIVE:
break;
}
break;
}
assoc->ipsa_mblkcnt = 0;
} else {
}
break;
default:
break;
}
return (rcode);
}
/*
* Check a proposed KMC update for sanity.
*/
static int
{
return (0);
return (ESRCH); /* DEAD == Not there, in this case. */
return (EINVAL);
}
return (EINVAL);
}
return (0);
}
/*
* Actually update the KMC info.
*/
static void
{
if (kmp != 0)
if (kmc != 0)
}
/*
* Common code to update an SA.
*/
int
{
&sq, diagnostic);
if (error != 0)
return (error);
if (error != 0)
return (error);
/*
* REFRELE the target and let the add_sa_func()
* deal with updating a larval SA.
*/
}
}
/*
* At this point we have an UPDATE to a MATURE SA. There should
* not be any keying material present.
*/
goto bail;
}
goto bail;
}
goto bail;
}
}
goto bail;
}
}
}
if (error) {
goto bail;
}
}
if (error) {
goto bail;
}
}
ksi, echo_target);
goto bail;
}
/*
* Reality checks for updates of active associations.
* Sundry first-pass UPDATE-specific reality checks.
* Have to do the checks here, because it's after the add_sa code.
*/
goto bail;
}
goto bail;
}
error = EOPNOTSUPP;
goto bail;
}
goto bail;
}
return (EINVAL);
if (error != 0)
goto bail;
if (error != 0)
goto bail;
/*
* Do not allow replay value change for MATURE or LARVAL SA.
*/
goto bail;
}
}
/*
* If an inbound SA, update the replay counter
* and check off all the other sequence number
*/
*diagnostic =
goto bail;
}
current +
} else {
current +
}
}
}
if (sadb_msg_type == SADB_X_UPDATEPAIR) {
} else {
goto bail;
}
}
if (error == 0)
ksi, echo_target);
bail:
return (error);
}
static int
int *diagnostic)
{
int error = 0;
assoc->sadb_sa_spi) {
return (EINVAL);
}
/*
* Assume for now that the spi value provided in the SADB_UPDATE
* message was valid, update the SA with its pair spi value.
* If the spi turns out to be bogus or the SA no longer exists
* then this will be detected when the reverse update is made
* below.
*/
/*
* After updating the ipsa_otherspi element of the SA, get_ipsa_pair()
* should now return pointers to the SA *AND* its pair, if this is not
* the case, the "otherspi" either did not exist or was deleted. Also
* check that "otherspi" is not already paired. If everything looks
* good, complete the update. IPSA_REFRELE the first pair_pointer
* after this update to ensure its not deleted until we are done.
*/
if (error != 0) {
/*
* This should never happen, calling function still has
* IPSA_REFHELD on the SA we just updated.
*/
return (error); /* XXX EINVAL instead of ESRCH? */
}
} else {
/* Its dead Jim! */
(IPSA_F_OUTBOUND | IPSA_F_INBOUND)) {
/* This SA is in both hashtables. */
} else if (ipsa_flags & IPSA_F_PAIRED) {
/* This SA is already paired with another. */
}
}
if (undo_pair) {
/* The pair SA does not exist. */
} else {
}
return (error);
}
/*
* The following functions deal with ACQUIRE LISTS. An ACQUIRE list is
* a list of outstanding SADB_ACQUIRE messages. If ipsec_getassocbyconn() fails
* for an outbound datagram, that datagram is queued up on an ACQUIRE record,
* and an SADB_ACQUIRE message is sent up. Presumably, a user-space key
* management daemon will process the ACQUIRE, use a SADB_GETSPI to reserve
* an SPI value and a larval SA, then SADB_UPDATE the larval SA, and ADD the
* other direction's SA.
*/
/*
* Check the ACQUIRE lists. If there's an existing ACQUIRE record,
* grab it, lock it, and return it. Otherwise return NULL.
*
* XXX MLS number of arguments getting unwieldy here
*/
static ipsacq_t *
{
}
/*
*
* XXX May need search for duplicates based on other things too!
*/
break; /* everything matched */
}
return (walker);
}
/*
* For this mblk, insert a new acquire record. Assume bucket contains addrs
* of all of the same length. Give up (and drop) if memory
* cannot be allocated for a new one; otherwise, invoke callback to
* send the acquire up..
*
* In cases where we need both AH and ESP, add the SA to the ESP ACQUIRE
* list. The ah_add_sa_finish() routines can look at the packet's attached
* attributes and handle this case specially.
*/
void
{
int hashoffset;
int sens_len;
/* Assign sadb pointers */
if (need_esp) { /* ESP for AH+ESP */
} else {
}
if (is_system_labeled())
/*
* Set up an ACQUIRE record.
*
* Immediately, make sure the ACQUIRE sequence number doesn't slip
* below the lowest point allowed in the kernel. (In other words,
* make sure the high bit on the sequence number is set.)
*/
} else {
}
if (tunnel_mode) {
/*
* Tunnel mode with no policy pointer means this is a
* reflected ICMP (like a ECHO REQUEST) that came in
* with self-encapsulated protection. Until we better
* support this, drop the packet.
*/
return;
}
/* Snag inner addresses. */
} else {
}
/*
* Check buckets to see if there is an existing entry. If so,
* grab it. sadb_checkacquire locks newbie if found.
*/
/*
* Otherwise, allocate a new one.
*/
return;
}
}
}
/*
* XXX MLS does it actually help us to drop the bucket lock here?
* we have inserted a half-built, locked acquire record into the
* bucket. any competing thread will now be able to lock the bucket
* to scan it, but will immediately pile up on the new acquire
* record's lock; I don't think we gain anything here other than to
* disperse blame for lock contention.
*
* we might be able to dispense with acquire record locks entirely..
* just use the bucket locks..
*/
/*
* This assert looks silly for now, but we may need to enter newbie's
* mutex during a search.
*/
/*
* Make the ip_xmit_attr_t into something we can queue.
* If no memory it frees datamp.
*/
/* Queue up packet. Use b_next. */
/* Statistics for allocation failure */
} else {
}
} else if (newbie->ipsacq_numpackets == 0) {
/* First one. */
/*
* Extended ACQUIRE with both AH+ESP will use ESP's timeout
* value.
*/
if (tunnel_mode) {
} else {
}
}
} else {
/* Scan to the end of the list & insert. */
/* Freeing the async message */
} else {
}
}
/*
* Reset addresses. Set them to the most recently added mblk chain,
* so that the address pointers in the acquire record will point
* at an mblk still attached to the acquire list.
*/
/*
* If the acquire record has more than one queued packet, we've
* already sent an ACQUIRE, and don't need to repeat ourself.
*/
/* I have an acquire outstanding already! */
return;
}
if (!keysock_extended_reg(ns))
goto punt_extended;
/*
* Construct an extended ACQUIRE. There are logging
* opportunities here in failure cases.
*/
if (tunnel_mode) {
} else {
}
sel.ips_is_icmp_inv_acq = 0;
} else {
}
extended = sadb_keysock_out(0);
goto punt_extended;
/*
* XXX MLS correct condition here?
* XXX MLS other credential attributes in acquire?
* XXX malloc failure? don't fall back to original?
*/
goto punt_extended;
}
}
goto punt_extended;
}
/*
* Send an ACQUIRE message (and possible an extended ACQUIRE) based on
* this new record. The send-acquire callback assumes that acqrec is
* already locked.
*/
return;
}
/*
* Unlink and free an acquire record.
*/
void
{
}
}
/* Unlink */
}
/*
* Free hanging mp's.
*
* XXX Instead of freemsg(), perhaps use IPSEC_REQ_FAILED.
*/
/* Freeing the async message */
}
/* Free */
}
/*
* Destroy an acquire list fanout.
*/
static void
netstack_t *ns)
{
int i;
return;
for (i = 0; i < numentries; i++) {
if (forever)
}
if (forever) {
}
}
/*
* Create an algorithm descriptor for an extended ACQUIRE. Filter crypto
* framework's view of reality vs. IPsec's. EF's wins, BTW.
*/
static uint8_t *
{
return (NULL);
/*
* Normalize vs. crypto framework's limits. This way, you can specify
* a stronger policy, and when the framework loads a stronger version,
* you can just keep plowing w/o rewhacking your SPD.
*/
return (NULL); /* Algorithm doesn't exist. Fail gracefully. */
}
return (cur);
}
/*
* Convert the given ipsec_action_t into an ecomb starting at *ecomb
* which must fit before *limit
*
* return NULL if we ran out of room or a pointer to the end of the ecomb.
*/
static uint8_t *
netstack_t *ns)
{
return (NULL);
ecomb->sadb_x_ecomb_numalgs = 0;
ecomb->sadb_x_ecomb_reserved = 0;
ecomb->sadb_x_ecomb_reserved2 = 0;
/*
* No limits on allocations, since we really don't support that
* concept currently.
*/
/*
* XXX TBD: Policy or global parameters will eventually be
* able to fill in some of these.
*/
ecomb->sadb_x_ecomb_flags = 0;
ecomb->sadb_x_ecomb_soft_bytes = 0;
ecomb->sadb_x_ecomb_hard_bytes = 0;
if (ipp->ipp_use_ah) {
return (NULL);
}
if (ipp->ipp_use_esp) {
if (ipp->ipp_use_espa) {
return (NULL);
}
return (NULL);
/* Fill in lifetimes if and only if AH didn't already... */
if (!ipp->ipp_use_ah)
}
return (cur);
}
/*
* From a cred_t, construct a sensitivity label extension
*
* We send up a fixed-size sensitivity label bitmap, and are perhaps
* overly chummy with the underlying data structures here.
*/
/* ARGSUSED */
int
{
}
void
int senslen)
{
/* LINTED */
sens->sadb_x_sens_flags = 0;
}
static sadb_sens_t *
{
/* XXX allocation failure? */
return (sens);
}
/*
* With a special designated "not a label" cred_t ?
*/
/* ARGSUSED */
{
if (sens->sadb_sens_integ_level != 0)
return (NULL);
if (sens->sadb_sens_integ_len != 0)
return (NULL);
return (NULL);
return (NULL);
return (tsl);
}
/* End XXX label-library-leakage */
/*
* Construct an extended ACQUIRE message based on a selector and the resulting
* IPsec action.
*
* NOTE: This is used by both inverse ACQUIRE and actual ACQUIRE
* generation. As a consequence, expect this function to evolve
* rapidly.
*/
static mblk_t *
{
/*
* Find the action we want sooner rather than later..
*/
} else {
}
/*
* Just take a swag for the allocation for now. We can always
* alter it later.
*/
return (NULL);
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_reserved = 0;
samsg->sadb_msg_satype = 0;
if (tunnel_mode) {
/*
* Form inner address extensions based NOT on the inner
* selectors (i.e. the packet data), but on the policy's
* selector key (i.e. the policy's selector information).
*
* NOTE: The position of IPv4 and IPv6 addresses is the
* same in ipsec_selkey_t (unless the compiler does very
* strange things with unions, consult your local C language
* lawyer for details).
*/
} else {
}
} else {
pfxlen = 0;
}
ipsl->ipsl_lport : 0;
ipsl->ipsl_proto : 0;
return (NULL);
}
} else {
pfxlen = 0;
}
ipsl->ipsl_rport : 0;
return (NULL);
}
/*
* TODO - if we go to 3408's dream of transport mode IP-in-IP
* _with_ inner-packet address selectors, we'll need to further
* distinguish tunnel mode here. For now, having inner
*
* outer addresses.
*/
proto = 0;
lport = 0;
rport = 0;
}
} else {
}
/*
* NOTE: The position of IPv4 and IPv6 addresses is the same in
* ipsec_selector_t.
*/
return (NULL);
}
return (NULL);
}
return (NULL);
}
}
/*
* This section will change a lot as policy evolves.
* For now, it'll be relatively simple.
*/
/* no space left */
return (NULL);
}
eprop->sadb_x_prop_ereserved = 0;
eprop->sadb_x_prop_numecombs = 0;
/*
* Skip non-IPsec policies
*/
continue;
}
return (NULL);
}
}
if (eprop->sadb_x_prop_numecombs == 0) {
/*
* This will happen if we fail to find a policy
* allowing for IPsec processing.
* Construct an error message.
*/
samsg->sadb_x_msg_diagnostic = 0;
return (mp);
}
return (NULL);
}
}
return (mp);
}
/*
* Generic setup of an RFC 2367 ACQUIRE message. Caller sets satype.
*
* NOTE: This function acquires alg_lock as a side-effect if-and-only-if we
* succeed (i.e. return non-NULL). Caller MUST release it. This is to
* maximize code consolidation while preventing algorithm changes from messing
* with the callers finishing touches on the ACQUIRE itself.
*/
mblk_t *
{
pfkeymp = sadb_keysock_out(0);
return (NULL);
/*
* First, allocate a basic ACQUIRE message
*/
sizeof (sadb_address_t) + sizeof (sadb_prop_t);
/* Make sure there's enough to cover both AF_INET and AF_INET6. */
/* NOTE: The lock is now held through to this function's return. */
if (tunnel_mode) {
/* Tunnel mode! */
/* Enough to cover both AF_INET and AF_INET6. */
}
return (NULL);
}
cur += sizeof (sadb_msg_t);
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* This should never happen unless we have kernel bugs. */
"sadb_setup_acquire: corrupt ACQUIRE record.\n");
ASSERT(0);
return (NULL);
}
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_pid = 0;
samsg->sadb_msg_reserved = 0;
sport_typecode = dport_typecode = 0;
} else {
}
if (tunnel_mode) {
}
/* XXX Insert identity information here. */
/* XXXMLS Insert sensitivity information here. */
else
return (pfkeymp);
}
/*
* Given an SADB_GETSPI message, find an appropriately ranged SA and
* allocate an SA. If there are message improprieties, return (ipsa_t *)-1.
* If there was a memory allocation error, return NULL. (Assume NULL !=
* (ipsa_t *)-1).
*
* master_spi is passed in host order.
*/
ipsa_t *
{
return ((ipsa_t *)-1);
}
return ((ipsa_t *)-1);
}
return ((ipsa_t *)-1);
}
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
return ((ipsa_t *)-1);
}
/* Return a random value in the range. */
if (cl_inet_getspi) {
} else {
sizeof (add));
}
}
/*
* Since master_spi is passed in host order, we need to htonl() it
* for the purposes of creating a new SA.
*/
ns));
}
/*
*
* Locate an ACQUIRE and nuke it. If I have an samsg that's larger than the
* base header, just ignore it. Otherwise, lock down the whole ACQUIRE list
* and scan for the sequence number in question. I may wish to accept an
* address pair with it, for easier searching.
*
* Caller frees the message, so we don't have to here.
*
* NOTE: The pfkey_q parameter may be used in the future for ACQUIRE
* failures.
*/
/* ARGSUSED */
void
netstack_t *ns)
{
int i;
/*
* I only accept the base header for this!
* Though to be honest, requiring the dst address would help
* immensely.
*
* XXX There are already cases where I can get the dst address.
*/
return;
/*
* Using the samsg->sadb_msg_seq, find the ACQUIRE record, delete it,
* (and in the future send a message to IP with the appropriate error
* number).
*
* Q: Do I want to reject if pid != 0?
*/
break; /* for acqrec... loop. */
}
break; /* for i = 0... loop. */
}
break; /* for acqrec... loop. */
}
break; /* for i = 0... loop. */
}
}
return;
/*
* What do I do with the errno and IP? I may need mp's services a
* little more. See sadb_destroy_acquire() for future directions
* beyond free the mblk chain on the acquire record.
*/
/* Have to exit mutex here, because of breaking out of for loop. */
}
/*
* The following functions work with the replay windows of an SA. They assume
* the ipsa->ipsa_replay_arr is an array of uint64_t, and that the bit vector
* represents the highest sequence number packet received, and back
* (ipsa->ipsa_replay_wsize) packets.
*/
/*
* Is the replay bit set?
*/
static boolean_t
{
}
/*
* Shift the bits of the replay window over.
*/
static void
{
int i;
if (shift == 0)
return;
}
}
}
/*
* Set a bit in the bit vector.
*/
static void
{
}
/*
* Assume caller has NOT done ntohl() already on seq. Check to see
* if replay sequence number "seq" has been seen already.
*/
{
if (ipsa->ipsa_replay_wsize == 0)
return (B_TRUE);
/*
* NOTE: I've already checked for 0 on the wire in sadb_replay_peek().
*/
/* Convert sequence number into host order before holding the mutex. */
/* Initialize inbound SA's ipsa_replay field to last one received. */
if (ipsa->ipsa_replay == 0)
/*
* I have received a new "highest value received". Shift
* the replay window over.
*/
/* In replay window, shift bits over. */
} else {
/* WAY FAR AHEAD, clear bits and start again. */
sizeof (ipsa->ipsa_replay_arr));
}
ipsa_set_replay(ipsa, 0);
goto done;
}
goto done;
}
/* Set this packet as seen. */
done:
return (rc);
}
/*
* "Peek" and see if we should even bother going through the effort of
* running an authentication check on the sequence number passed in.
* this takes into account packets that are below the replay window,
* and collisions with already replayed packets. Return B_TRUE if it
* is okay to proceed, B_FALSE if this packet should be dropped immediately.
* Assume same byte-ordering as sadb_replay_check.
*/
{
if (ipsa->ipsa_replay_wsize == 0)
return (B_TRUE);
/*
* 0 is 0, regardless of byte order... :)
*
* If I get 0 on the wire (and there is a replay window) then the
* sender most likely wrapped. This ipsa may need to be marked or
* something.
*/
if (seq == 0)
return (B_FALSE);
goto done;
/*
* If I've hit 0xffffffff, then quite honestly, I don't need to
* bother with formalities. I'm not accepting any more packets
* on this SA.
*/
/*
* Since we're already holding the lock, update the
* expire time ala. sadb_replay_delete() and return.
*/
goto done;
}
/*
* This seq is in the replay window. I'm not below it,
* because I already checked for that above!
*/
goto done;
}
/* Else return B_TRUE, I'm going to advance the window. */
done:
return (rc);
}
/*
* Delete a single SA.
*
* For now, use the quick-and-dirty trick of making the association's
* hard-expire lifetime (time_t)1, ensuring deletion by the *_ager().
*/
void
{
}
/*
* Special front-end to ipsec_rl_strlog() dealing with SA failure.
* this is designed to take only a format string with "* %x * %s *", so
* that "spi" is printed first, then "addr" is converted using inet_pton().
*
* This is abstracted out to save the stack space for only when inet_pton()
* is called. Make sure "spi" is in network order; it usually is when this
* would get called.
*/
void
{
}
/*
* Fills in a reference to the policy, if any, from the conn, in *ppp
*/
static void
{
} else {
}
}
/*
* The following functions scan through active conn_t structures
* and return a reference to the best-matching policy it can find.
* Caller must release the reference.
*/
static void
{
if (sel->ips_local_port == 0)
return;
ipst)];
break;
}
/* Try port-only match in IPv6. */
}
}
break;
}
return;
}
}
}
static conn_t *
{
if (sel->ips_local_port == 0)
return (NULL);
break;
}
/* Match to all-zeroes. */
}
}
break;
}
return (NULL);
}
}
return (connp);
}
static void
{
/*
* Find TCP state in the following order:
* 1.) Connected conns.
* 2.) Listeners.
*
* Even though #2 will be the common case for inbound traffic, only
* following this order insures correctness.
*/
if (sel->ips_local_port == 0)
return;
/*
* 0 should be fport, 1 should be lport. SRC is the local one here.
* See ipsec_construct_inverse_acquire() for details.
*/
ports))
break;
}
} else {
ports))
break;
}
}
} else {
/* Try the listen hash. */
return;
}
}
static void
{
/*
* Find SCP state in the following order:
* 1.) Connected conns.
* 2.) Listeners.
*
* Even though #2 will be the common case for inbound traffic, only
* following this order insures correctness.
*/
if (sel->ips_local_port == 0)
return;
/*
* 0 should be fport, 1 should be lport. SRC is the local one here.
* See ipsec_construct_inverse_acquire() for details.
*/
/*
* For labeled systems, there's no need to check the
* label here. It's known to be good as we checked
* before allowing the connection to become bound.
*/
} else {
}
return;
}
/*
* Fill in a query for the SPD (in "sel") using two PF_KEY address extensions.
* Returns 0 or errno, and always sets *diagnostic to something appropriate
* to PF_KEY.
*
* NOTE: For right now, this function (and ipsec_selector_t for that matter),
* ignore prefix lengths in the address extension. Since we match on first-
* entered policies, this shouldn't matter. Also, since we normalize prefix-
* set addresses to mask out the lower bits, we should get a suitable search
* key for the SPD anyway. This is the function to change if the assumption
* about suitable search keys is wrong.
*/
static int
{
*diagnostic = 0;
return (EINVAL);
}
} else {
}
} else {
return (EINVAL);
}
} else {
}
}
return (0);
}
/*
* We have encapsulation.
* - Lookup tun_t by address and look for an associated
* tunnel policy
* - If there are inner selectors
* - check ITPF_P_TUNNEL and ITPF_P_ACTIVE
* - Look up tunnel policy based on selectors
* - Else
* - Sanity check the negotation
* - If appropriate, fall through to global policy
*/
static int
int *diagnostic)
{
int err;
*diagnostic = 0;
/* Check for inner selectors and act appropriately */
/* Inner selectors present */
(ITPF_P_ACTIVE | ITPF_P_TUNNEL)) {
/*
* If inner packet selectors, we must have negotiate
* tunnel and active policy. If the tunnel has
* transport-mode policy set on it, or has no policy,
* fail.
*/
return (ENOENT);
} else {
/*
* Reset "sel" to indicate inner selectors. Pass
* inner PF_KEY address extensions for this to happen.
*/
return (err);
/*
* Now look for a tunnel policy based on those inner
* selectors. (Common code is below.)
*/
}
} else {
/* No inner selectors present */
/*
* Transport mode negotiation with no tunnel policy
* configured - return to indicate a global policy
* check is needed.
*/
return (0);
/* Tunnel mode set with no inner selectors. */
return (ENOENT);
}
/*
* Else, this is a tunnel policy configured with ifconfig(1m)
* or "negotiate transport" with ipsecconf(1m). We have an
* itp with policy set based on any match, so don't bother
* changing fields in "sel".
*/
}
/*
* Don't default to global if we didn't find a matching policy entry.
* Instead, send ENOENT, just like if we hit a transport-mode tunnel.
*/
return (ENOENT);
return (0);
}
/*
* For sctp conn_faddr is the primary address, hence this is of limited
* use for sctp.
*/
static void
{
if (isv4) {
} else {
}
if (isv4) {
break;
} else {
&sel->ips_local_addr_v6)) &&
&sel->ips_remote_addr_v6)))
break;
}
}
return;
}
}
/*
* Construct an inverse ACQUIRE reply based on:
*
* 1.) Current global policy.
* 2.) An conn_t match depending on what all was passed in the extv[].
* 3.) A tunnel's policy head.
* ...
* N.) Other stuff TBD (e.g. identities)
*
* If there is an error, set sadb_msg_errno and sadb_x_msg_diagnostic
* in this function so the caller can extract them where appropriately.
*
* The SRC address is the local one - just like an outbound ACQUIRE message.
*
* XXX MLS: key management supplies a label which we just reflect back up
* again. clearly we need to involve the label in the rest of the checks.
*/
mblk_t *
netstack_t *ns)
{
int err;
int diagnostic;
/* Normalize addresses */
== KS_IN_ADDR_UNKNOWN) {
goto bail;
}
== KS_IN_ADDR_UNKNOWN) {
goto bail;
}
goto bail;
}
/* Check for tunnel mode and act appropriately */
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
/* Get selectors first, based on outer addresses */
if (err != 0)
goto bail;
/* Check for tunnel mode mismatches. */
err = EPROTOTYPE;
goto bail;
}
/*
* Okay, we have the addresses and other selector information.
* Let's first find a conn...
*/
switch (sel.ips_protocol) {
case IPPROTO_TCP:
break;
case IPPROTO_UDP:
break;
case IPPROTO_SCTP:
break;
case IPPROTO_ENCAP:
case IPPROTO_IPV6:
/*
* Assume sel.ips_remote_addr_* has the right address at
* that exact position.
*/
ipst);
/*
* Transport-mode tunnel, make sure we fake out isel
* to contain something based on the outer protocol.
*/
} /* Else isel is initialized by ipsec_tun_pol(). */
&diagnostic);
/*
* NOTE: isel isn't used for now, but in RFC 430x IPsec, it
* may be.
*/
if (err != 0)
goto bail;
break;
default:
break;
}
/*
* If we didn't find a matching conn_t or other policy head, take a
* look in the global policy.
*/
/* There's no global policy. */
diagnostic = 0;
goto bail;
}
}
/*
* message based on that, fix fields where appropriate,
* and return the message.
*/
}
bail:
}
return (retmp);
}
/*
* ipsa_lpkt is a one-element queue, only manipulated by the next two
* functions. They have to hold the ipsa_lock because of potential races
* between key management using SADB_UPDATE, and inbound packets that may
* queue up on the larval SA (hence the 'l' in "lpkt").
*/
/*
* sadb_set_lpkt:
*
* Returns the passed-in packet if the SA is no longer larval.
*
* Returns NULL if the SA is larval, and needs to be swapped into the SA for
* processing after an SADB_UPDATE.
*/
mblk_t *
{
/*
* Consume npkt and place it in the LARVAL SA's inbound
* packet slot.
*/
} else {
}
} else {
/*
* If not larval, we lost the race. NOTE: ipsa_lpkt may still
* have been non-NULL in the non-larval case, because of
* inbound packets arriving prior to sadb_common_add()
* transferring the SA completely out of larval state, but
* after lpkt was grabbed by the AH/ESP-specific add routines.
* We should clear the old ipsa_lpkt in this case to make sure
* that it doesn't linger on the now-MATURE IPsec SA, or get
* picked up as an out-of-order packet.
*/
}
}
return (npkt);
}
/*
* sadb_clear_lpkt: Atomically clear ipsa->ipsa_lpkt and return the
* previous value.
*/
mblk_t *
{
return (opkt);
}
/*
* Buffer a packet that's in IDLE state as set by Solaris Clustering.
*/
void
{
if (cl_inet_idlesa == NULL) {
return;
}
return;
}
ipsa->ipsa_mblkcnt++;
} else {
ipsa->ipsa_mblkcnt --;
}
}
}
/*
* Stub function that taskq_dispatch() invokes to take the mblk (in arg)
* and put into STREAMS again.
*/
void
{
/* The ill or ip_stack_t disappeared on us. */
} else {
}
}
}
/*
* context template when a crypto software provider is removed or
* added.
*/
struct sadb_update_alg_state {
};
static void
{
(struct sadb_update_alg_state *)cookie;
return;
update_state->async_auth)) {
} else {
}
switch (update_state->alg_type) {
case IPSEC_ALG_AUTH:
break;
case IPSEC_ALG_ENCR:
break;
default:
}
return;
}
/*
* The context template of the SA may be affected by the change
* of crypto provider.
*/
if (update_state->is_added) {
/* create the context template if not already done */
(void) ipsec_create_ctx_tmpl(entry,
}
} else {
/*
* The crypto provider was removed. If the context template
* exists but it is no longer valid, free it.
*/
}
}
/*
* Invoked by IP when an software crypto provider has been updated, or if
* the crypto synchrony changes. The type and id of the corresponding
* algorithm is passed as argument. The type is set to ALL in the case of
* a synchrony change.
*
* is_added is B_TRUE if the provider was added, B_FALSE if it was
* context templates associated with SAs if needed.
*/
void
netstack_t *ns)
{
/* walk the AH tables only for auth. algorithm changes */
}
/* walk the ESP tables */
}
/*
* Creates a context template for the specified SA. This function
* is called when an SA is created and when a context template needs
* to be created due to a change of software provider.
*/
int
{
int rv;
/* get pointers to the algorithm info, context template, and key */
switch (alg_type) {
case IPSEC_ALG_AUTH:
break;
case IPSEC_ALG_ENCR:
break;
default:
}
return (EINVAL);
/* initialize the mech info structure for the framework */
mech.cm_param_len = 0;
/* create a new context template */
/*
* CRYPTO_MECH_NOT_SUPPORTED can be returned if only hardware
* providers are available for that mechanism. In that case
* we don't fail, and will generate the context template from
* the framework callback when a software provider for that
* mechanism registers.
*
* The context template is assigned the special value
* IPSEC_CTX_TMPL_ALLOC if the allocation failed due to a
* lack of memory. No attempt will be made to use
* the context template if it is set to this value.
*/
if (rv == CRYPTO_HOST_MEMORY) {
} else if (rv != CRYPTO_SUCCESS) {
if (rv != CRYPTO_MECH_NOT_SUPPORTED)
return (EINVAL);
}
return (0);
}
/*
* Destroy the context template of the specified algorithm type
* of the specified SA. Must be called while holding the SA lock.
*/
void
{
if (alg_type == IPSEC_ALG_AUTH) {
}
} else {
}
}
}
/*
* Use the kernel crypto framework to check the validity of a key received
* via keysock. Returns 0 if the key is OK, -1 otherwise.
*/
int
{
int crypto_rc;
mech.cm_param_len = 0;
switch (crypto_rc) {
case CRYPTO_SUCCESS:
return (0);
case CRYPTO_MECHANISM_INVALID:
break;
case CRYPTO_KEY_SIZE_RANGE:
break;
case CRYPTO_WEAK_KEY:
break;
}
return (-1);
}
/*
* Whack options in the outer IP header when ipsec changes the outer label
*
* This is inelegant and really could use refactoring.
*/
mblk_t *
{
int delta;
int plen;
int hlen;
/* XXX XXX code copied from tsol_check_label */
/* Make sure we have room for the worst-case addition */
if (hlen > IP_MAX_HDR_LENGTH)
int copylen;
/* allocate enough to be meaningful, but not *too* much */
if (copylen > 256)
copylen = 256;
return (NULL);
}
/* keep the bias */
}
}
/*
* Paranoia
*/
/* End paranoia */
return (mp);
}
mblk_t *
{
int delta;
int plen;
int hlen;
/* XXX XXX code copied from tsol_check_label_v6 */
/*
* Make sure we have room for the worst-case addition. Add 2 bytes for
* the hop-by-hop ext header's next header and length fields. Add
* another 2 bytes for the label option type, len and then round
* up to the next 8-byte multiple.
*/
int copylen;
/*
* Allocate enough to be meaningful, but not *too* much.
* Also all the IPv6 extension headers must be in the same mblk
*/
if (copylen > 256)
copylen = 256;
return (NULL);
}
/* keep the bias */
}
}
/*
* Paranoia
*/
/* End paranoia */
return (mp);
}
/* Whack the labels and update ip_xmit_attr_t as needed */
mblk_t *
{
int adjust;
int iplen;
return (NULL);
} else {
return (NULL);
}
return (mp);
}
/*
* If this is an outgoing SA then add some fuzz to the
* SOFT EXPIRE time. The reason for this is to stop
* peers trying to renegotiate SOFT expiring SA's at
* the same time. The amount of fuzz needs to be at
* least 8 seconds which is the typical interval
* sadb_ager(), although this is only a guide as it
* selftunes.
*/
static void
{
if (assoc->ipsa_softaddlt == 0)
return;
}
static void
{
/*
* Because of the multi-line macro nature of IPSA_REFRELE, keep
* them in { }.
*/
}
}
}
static void
{
}
/*
* The sadb_ager() function walks through the hash tables of SA's and ages
* them, if the SA expires as a result, its marked as DEAD and will be reaped
* the next time sadb_ager() runs. SA's which are paired or have a peer (same
* SA appears in both the inbound and outbound tables because its not possible
* to determine its direction) are placed on a list when they expire. This is
* expire at different times.
*
* This function is called twice by sadb_ager(), one after processing the
* inbound table, then again after processing the outbound table.
*/
void
{
int outhash;
/*
* Haspeer cases will contain both IPv4 and IPv6. This code
* is address independent.
*/
while (haspeerlist != NULL) {
/* "dying" contains the SA that has a peer. */
/*
* Pick peer bucket based on addrfam.
*/
if (outbound) {
if (haspeer)
else
} else { /* inbound */
if (haspeer) {
*((in6_addr_t *)&dying->
ipsa_dstaddr));
} else {
ipsa_dstaddr));
}
*((in6_addr_t *)&dying->
ipsa_srcaddr));
} else {
ipsa_srcaddr));
}
}
/*
*/
if (haspeer) {
} else {
}
if (peer_assoc != NULL) {
if (!haspeer) {
/*
* Only SA's which have a "peer" or are
* "paired" end up on this list, so this
* must be a "paired" SA, update the flags
* to break the pair.
*/
peer_assoc->ipsa_otherspi = 0;
dying->ipsa_otherspi = 0;
}
/*
* Update the state of the "inbound" SA when
* the "outbound" SA has expired. Don't update
* the "outbound" SA when the "inbound" SA
* SA expires because setting the hard_addtime
* below will cause this to happen.
*/
}
}
}
}
/*
* Ensure that the IV used for CCM mode never repeats. The IV should
* only be updated by this function. Also check to see if the IV
* is about to wrap and generate a SOFT Expire. This function is only
* called for outgoing packets, the IV for incomming packets is taken
* from the wire. If the outgoing SA needs to be expired, update
* the matching incomming SA.
*/
{
int sa_new_state = 0;
/* For non counter modes, the IV is random data. */
return (rc);
}
/*
* This SA may have already been expired when its
* PAIR_SA expired.
*/
}
}
if (sa_new_state) {
/*
* If there is a state change, we need to update this SA
* and its "pair", we can find the bucket for the "pair" SA
* while holding the ipsa_t mutex, but we won't actually
* update anything untill the ipsa_t mutex has been released
* for _this_ SA.
*/
} else {
}
}
if (sa_new_state) {
/* Find the inbound SA, need to lock hash bucket. */
}
}
return (rc);
}
void
{
/* See gcm_params_init() for comments. */
}
/* ARGSUSED */
void
{
}
/* ARGSUSED */
void
{
/*
* Create the nonce, which is made up of the salt and the IV.
* Copy the salt from the SA and the IV from the packet.
* For inbound packets we copy the IV from the packet because it
* was set by the sending system, for outbound packets we copy the IV
* from the packet because the IV in the SA may be changed by another
* thread, the IV in the packet was created while holding a mutex.
*/
}