sadb.c revision e35d2278fa5447def80bb5a191ce0f1c6b6836de
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <inet/ipsec_info.h>
#include <inet/ipsec_impl.h>
#include <inet/ipsecesp.h>
#include <inet/ipclassifier.h>
/*
* This source file contains Security Association Database (SADB) common
* routines. They are linked in with the AH module. Since AH has no chance
* of falling under export control, it was safe to link it in there.
*/
netstack_t *);
/*
* ipsacq_maxpackets is defined here to make it tunable
*/
extern uint64_t ipsacq_maxpackets;
} \
}
else \
} \
}
/* wrap the macro so we can pass it as a function pointer */
void
sadb_sa_refrele(void *target)
{
}
/*
* We presume that sizeof (long) == sizeof (time_t) and that time_t is
* a signed type.
*/
/*
* PF_KEY gives us lifetimes in uint64_t seconds. We presume that
* time_t is defined to be a signed type with the same range as
* "long". On ILP32 systems, we thus run the risk of wrapping around
* at end of time, as well as "overwrapping" the clock back around
* into a seemingly valid but incorrect future date earlier than the
* desired expiration.
*
* In order to avoid odd behavior (either negative lifetimes or loss
* of high order bits) when someone asks for bizarrely long SA
* lifetimes, we do a saturating add for expire times.
*
* We presume that ILP32 systems will be past end of support life when
* the 32-bit time_t overflows (a dangerous assumption, mind you..).
*
* On LP64, 2^64 seconds are about 5.8e11 years, at which point we
* will hopefully have figured out clever ways to avoid the use of
* fixed-sized integers in computation.
*/
static time_t
{
/*
* Clip delta to the maximum possible time_t value to
* prevent "overwrapping" back into a shorter-than-desired
* future time.
*/
/*
* This sum may still overflow.
*/
/*
* .. so if the result is less than the base, we overflowed.
*/
return (sum);
}
/*
* Callers of this function have already created a working security
* association, and have found the appropriate table & hash chain. All this
* function does is check duplicates, and insert the SA. The caller needs to
* hold the hash bucket lock and increment the refcnt before insertion.
*
* Return 0 if success, EEXIST if collision.
*/
int
{
/*
* Find insertion point (pointed to with **ptpn). Insert at the head
* of the list unless there's an unspecified source address, then
* insert it after the last SA with a specified source address.
*
* BTW, you'll have to walk the whole chain, matching on {DST, SPI}
* checking for collisions.
*/
return (EEXIST);
((walker->ipsa_unique_id &
walker->ipsa_unique_mask) ==
(ipsa->ipsa_unique_id &
ipsa->ipsa_unique_mask))) {
}
}
}
}
return (0);
}
/*
* Free a security association. Its reference count is 0, which means
* I must free it. The SA must be unlocked and must not be linked into
* any fanout list.
*/
static void
{
if (ipsa->ipsa_natt_ka_timer != 0)
/* bzero() these fields for paranoia's sake. */
}
}
}
}
}
/*
* Unlink a security association from a hash bucket. Assume the hash bucket
* lock is held, but the association's lock is not.
*
* Note that we do not bump the bucket's generation number here because
* we might not be making a visible change to the set of visible SA's.
* All callers MUST bump the bucket's generation number before they unlock
* the bucket if they use sadb_unlinkassoc to permanetly remove an SA which
* was present in the bucket at the time it was locked.
*/
void
{
/* These fields are protected by the link lock. */
}
/* This may destroy the SA. */
}
/*
* Create a larval security association with the specified SPI. All other
* fields are zeroed.
*/
static ipsa_t *
netstack_t *ns)
{
/*
* Allocate...
*/
/* Can't make new larval SA. */
return (NULL);
}
/* Assigned requested SPI, assume caller does SPI allocation magic. */
/*
* Copy addresses...
*/
/*
* Set common initialization values, including refcnt.
*/
/*
* There aren't a lot of other common initialization values, as
* they are copied in from the PF_KEY message.
*/
return (newbie);
}
/*
* Call me to initialize a security association fanout.
*/
static int
{
int i;
return (ENOMEM);
for (i = 0; i < size; i++) {
}
return (0);
}
/*
* Call me to initialize an acquire fanout
*/
static int
{
int i;
return (ENOMEM);
for (i = 0; i < size; i++) {
}
return (0);
}
/*
* Attempt to initialize an SADB instance. On failure, return ENOMEM;
* caller must clean up partial allocations.
*/
static int
{
return (ENOMEM);
return (ENOMEM);
return (ENOMEM);
return (0);
}
/*
* Call me to initialize an SADB instance; fall back to default size on failure.
*/
static void
netstack_t *ns)
{
if (size < IPSEC_DEFAULT_HASH_SIZE)
"Unable to allocate %u entry IPv%u %s SADB hash table",
}
}
/*
* Initialize an SADB-pair.
*/
void
{
if (type == SADB_SATYPE_AH) {
}
}
/*
* Deliver a single SADB_DUMP message representing a single SA. This is
* called many times by sadb_dump().
*
* If the return value of this is ENOBUFS (not the same as ENOMEM), then
* the caller should take that as a hint that dupb() on the "original answer"
* failed, and that perhaps the caller should try again with a copyb()ed
* "original answer".
*/
static int
{
return (ENOBUFS);
return (ENOMEM);
}
/* Just do a putnext, and let keysock deal with flow control. */
return (0);
}
/*
* Common function to allocate and prepare a keysock_out_t M_CTL message.
*/
mblk_t *
{
}
return (mp);
}
/*
* Perform an SADB_DUMP, spewing out every SA in an array of SA fanouts
* to keysock.
*/
static int
{
int i, error = 0;
/*
* For each IPSA hash bucket do:
* - Hold the mutex
* - Walk each entry, doing an sadb_dump_deliver() on it.
*/
if (original_answer == NULL)
return (ENOMEM);
for (i = 0; i < num_entries; i++) {
continue;
/* Ran out of dupb's. Try a copyb. */
if (new_original_answer == NULL) {
} else {
}
}
if (error != 0)
break; /* out of for loop. */
}
if (error != 0)
break; /* out of for loop. */
}
return (error);
}
/*
* Dump an entire SADB; outbound first, then inbound.
*/
int
{
int error;
/* Dump outbound */
if (error)
return (error);
/* Dump inbound */
}
/*
* Generic sadb table walker.
*
* Call "walkfn" for each SA in each bucket in "table"; pass the
* bucket, the entry and "cookie" to the callback function.
* Take care to ensure that walkfn can delete the SA without screwing
* up our traverse.
*
* The bucket is locked for the duration of the callback, both so that the
* callback can just call sadb_unlinkassoc() when it wants to delete something,
* and so that no new entries are added while we're walking the list.
*/
static void
void *cookie)
{
int i;
for (i = 0; i < numentries; i++) {
}
}
}
/*
* From the given SA, construct a dl_ct_ipsec_key and
* a dl_ct_ipsec structures to be sent to the adapter as part
* of a DL_CONTROL_REQ.
*
* ct_sa must point to the storage allocated for the key
* structure and must be followed by storage allocated
* for the SA information that must be sent to the driver
* as part of the DL_CONTROL_REQ request.
*
* The is_inbound boolean indicates whether the specified
* SA is part of an inbound SA table.
*
* Returns B_TRUE if the corresponding SA must be passed to
* a provider, B_FALSE otherwise; frees *mp if it returns B_FALSE.
*/
static boolean_t
{
"is_inbound = %d\n", is_inbound));
/* initialize flag */
sap->sadb_sa_flags = 0;
if (is_inbound) {
/*
* If an inbound SA has a peer, then mark it has being
* an outbound SA as well.
*/
if (sa->ipsa_haspeer)
} else {
/*
* If an outbound SA has a peer, then don't send it,
* since we will send the copy from the inbound table.
*/
if (sa->ipsa_haspeer) {
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* Called from AH or ESP to format a message which will be used to inform
* IPsec-acceleration-capable ills of a SADB change.
* (It is not possible to send the message to IP directly from this function
* since the SA, if any, is locked during the call).
*
* dl_operation: DL_CONTROL_REQ operation (add, delete, update, etc)
* sa_type: identifies whether the operation applies to AH or ESP
* (must be one of SADB_SATYPE_AH or SADB_SATYPE_ESP)
* sa: Pointer to an SA. Must be non-NULL and locked
* for ADD, DELETE, GET, and UPDATE operations.
* This function returns an mblk chain that must be passed to IP
* for forwarding to the IPsec capable providers.
*/
mblk_t *
{
/*
* 1 allocate and initialize DL_CONTROL_REQ M_PROTO
* 2 if a key is needed for the operation
* 2.1 initialize key
* 2.2 if a full SA is needed for the operation
* 2.2.1 initialize full SA info
* 3 return message; caller will call ill_ipsec_capab_send_all()
* to send the resulting message to IPsec capable ills.
*/
/*
* Allocate DL_CONTROL_REQ M_PROTO
* We allocate room for the SA even if it's not needed
* by some of the operations (for example flush)
*/
return (NULL);
/* initialize dl_control_req_t */
sizeof (dl_ct_ipsec_key_t);
/*
* Initialize key and SA data. Note that for some
* operations the SA data is ignored by the provider
* (delete, etc.)
*/
return (NULL);
}
/* construct control message */
return (NULL);
}
if (need_key) {
/*
* Keep an additional reference on SA, since it will be
* needed by IP to send control messages corresponding
* to that SA from its perimeter. IP will do a
* IPSA_REFRELE when done with the request.
*/
} else
return (ctl_mp);
}
/*
* Called by sadb_ill_download() to dump the entries for a specific
* fanout table. For each SA entry in the table passed as argument,
* use mp as a template and constructs a full DL_CONTROL message, and
* call ill_dlpi_send(), provided by IP, to send the resulting
* messages to the ill.
*/
static void
{
int i, error = 0;
(void *)fanout, num_entries));
/*
* For each IPSA hash bucket do:
* - Hold the mutex
* - Walk each entry, sending a corresponding request to IP
* for it.
*/
for (i = 0; i < num_entries; i++) {
("sadb_ill_df: sending SA to ill via IP \n"));
/*
* Duplicate the template mp passed and
* complete DL_CONTROL_REQ data.
* To be more memory efficient, we could use
* dupb() for the M_CTL and copyb() for the M_PROTO
* as the M_CTL, since the M_CTL is the same for
* every SA entry passed down to IP for the same ill.
*
* is at least as large as the source mblk even if it's
* not using all its storage -- therefore, nmp
* has trailing space for sadb_req_from_sa to add
* the SA-specific bits.
*/
if (ipsec_capab_match(ill,
("sadb_ill_df: alloc error\n"));
break;
}
}
}
}
}
if (error != 0)
break; /* out of for loop. */
}
}
/*
* Called by ill_ipsec_capab_add(). Sends a copy of the SADB of
* the type specified by sa_type to the specified ill.
*
* We call for each fanout table defined by the SADB (one per
* protocol). sadb_ill_df() finally calls ill_dlpi_send() for
* each SADB entry in order to send a corresponding DL_CONTROL_REQ
* message to the ill.
*/
void
{
int dlt;
/*
* Allocate and initialize prototype answer. A duplicate for
* each SA is sent down to the interface.
*/
/* DL_CONTROL_REQ M_PROTO mblk_t */
return;
if (sa_type == SADB_SATYPE_ESP) {
} else {
}
sizeof (dl_ct_ipsec_key_t);
/*
* then for each SADB entry, we fill out the dl_ct_ipsec_key_t
* and dl_ct_ipsec_t
*/
}
/*
* Call me to free up a security association fanout. Use the forever
* variable to indicate freeing up the SAs (forever == B_FALSE, e.g.
* an SADB_FLUSH message), or destroying everything (forever == B_TRUE,
* when a module is unloaded).
*/
static void
{
int i;
return;
for (i = 0; i < numentries; i++) {
if (forever)
}
if (forever) {
}
}
/*
* Entry points to sadb_destroyer().
*/
static void
{
/*
* Flush out each bucket, one at a time. Were it not for keysock's
* enforcement, there would be a subtlety where I could add on the
* heels of a flush. With keysock's enforcement, however, this
* makes ESP's job easy.
*/
/* For each acquire, destroy it; leave the bucket mutex alone. */
}
static void
{
/* For each acquire, destroy it, including the bucket mutex. */
}
static void
{
/*
* we've been unplumbed, or never were plumbed; don't go there.
*/
return;
/* have IP send a flush msg to the IPsec accelerators */
}
void
{
}
void
{
}
}
/*
* Check hard vs. soft lifetimes. If there's a reality mismatch (e.g.
* soft lifetimes > hard lifetimes) return an appropriate diagnostic for
* EINVAL.
*/
int
{
return (0);
if (hard->sadb_lifetime_allocations != 0 &&
soft->sadb_lifetime_allocations != 0 &&
return (SADB_X_DIAGNOSTIC_ALLOC_HSERR);
if (hard->sadb_lifetime_bytes != 0 &&
soft->sadb_lifetime_bytes != 0 &&
return (SADB_X_DIAGNOSTIC_BYTES_HSERR);
if (hard->sadb_lifetime_addtime != 0 &&
soft->sadb_lifetime_addtime != 0 &&
return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR);
if (hard->sadb_lifetime_usetime != 0 &&
soft->sadb_lifetime_usetime != 0 &&
return (SADB_X_DIAGNOSTIC_USETIME_HSERR);
return (0);
}
/*
* Clone a security association for the purposes of inserting a single SA
* into inbound and outbound tables respectively.
*/
static ipsa_t *
{
return (NULL);
/* Copy over what we can. */
/* bzero and initialize locks, in case *_init() allocates... */
/*
* While somewhat dain-bramaged, the most graceful way to
* recover from errors is to keep plowing through the
* allocations, and getting what I can. It's easier to call
* sadb_freeassoc() on the stillborn clone when all the
* pointers aren't pointing to the parent's data.
*/
} else {
}
(char *)&newbie->ipsa_mac_len;
}
}
} else {
}
}
} else {
}
}
} else {
}
}
}
}
if (error) {
return (NULL);
}
return (newbie);
}
/*
* Initialize a SADB address extension at the address specified by addrext.
* Return a pointer to the end of the new address extension.
*/
static uint8_t *
{
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6;
int addrext_len;
int sin_len;
return (NULL);
return (NULL);
addrext->sadb_address_reserved = 0;
switch (af) {
case AF_INET:
return (NULL);
break;
case AF_INET6:
return (NULL);
break;
}
return (cur);
}
/*
* Construct a key management cookie extension.
*/
static uint8_t *
{
return (NULL);
return (NULL);
kmcext->sadb_x_kmc_reserved = 0;
return (cur);
}
/*
* Given an original message header with sufficient space following it, and an
* SA, construct a full PF_KEY message with all of the relevant extensions.
* This is mostly used for SADB_GET, and SADB_DUMP.
*/
static mblk_t *
{
/*
* The following are pointers into the PF_KEY message this PF_KEY
* message creates.
*/
sadb_ident_t *ident;
/* These indicate the presence of the above extension fields. */
/* First off, figure out the allocation length for this message. */
/*
* Constant stuff. This includes base, SA, address (src, dst),
* and lifetime (current).
*/
sizeof (sadb_lifetime_t);
switch (fam) {
case AF_INET:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
case AF_INET6:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
default:
return (NULL);
}
/*
* Allocate TWO address extensions, for source and destination.
* (Thus, the * 2.)
*/
/* How 'bout other lifetimes? */
alloclen += sizeof (sadb_lifetime_t);
} else {
}
alloclen += sizeof (sadb_lifetime_t);
} else {
}
/* Inner addresses. */
if (ipsa->ipsa_innerfam == 0) {
} else {
switch (pfam) {
case AF_INET6:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
case AF_INET:
sizeof (sadb_address_t), sizeof (uint64_t));
break;
default:
"IPsec SADB: Proxy length failure.\n");
break;
}
}
/* For the following fields, assume that length != 0 ==> stuff */
if (ipsa->ipsa_authkeylen != 0) {
sizeof (uint64_t));
} else {
}
if (ipsa->ipsa_encrkeylen != 0) {
sizeof (uint64_t));
} else {
}
/* No need for roundup on sens and integ. */
} else {
}
/*
* Must use strlen() here for lengths. Identities use NULL
* pointers to indicate their nonexistence.
*/
sizeof (uint64_t));
} else {
}
sizeof (uint64_t));
} else {
}
alloclen += sizeof (sadb_x_kmc_t);
/* Make sure the allocation length is a multiple of 8 bytes. */
/* XXX Possibly make it esballoc, with a bzero-ing free_ftn. */
return (NULL);
if (hard) {
lt++;
}
if (soft) {
lt++;
}
/* NOTE: Don't fill in ports here if we are a tunnel-mode SA. */
goto bail;
}
goto bail;
}
goto bail;
}
}
IPPROTO_UDP, 0);
goto bail;
}
}
/* If we are a tunnel-mode SA, fill in the inner-selectors. */
if (isrc) {
goto bail;
}
}
if (idst) {
goto bail;
}
}
goto bail;
}
}
if (auth) {
key->sadb_key_reserved = 0;
}
if (encr) {
key->sadb_key_reserved = 0;
}
if (srcid) {
ident = (sadb_ident_t *)walker;
ident->sadb_ident_exttype = SADB_EXT_IDENTITY_SRC;
ident->sadb_ident_id = 0;
ident->sadb_ident_reserved = 0;
(void) strcpy((char *)(ident + 1),
}
if (dstid) {
ident = (sadb_ident_t *)walker;
ident->sadb_ident_exttype = SADB_EXT_IDENTITY_DST;
ident->sadb_ident_id = 0;
ident->sadb_ident_reserved = 0;
(void) strcpy((char *)(ident + 1),
}
if (sensinteg) {
sens->sadb_sens_reserved = 0;
}
}
bail:
/* Pardon any delays... */
return (mp);
}
/*
* Strip out key headers or unmarked headers (SADB_EXT_KEY_*, SADB_EXT_UNKNOWN)
* and adjust base message accordingly.
*
* Assume message is pulled up in one piece of contiguous memory.
*
* Say if we start off with:
*
* +------+----+-------------+-----------+---------------+---------------+
* | base | SA | source addr | dest addr | rsrvd. or key | soft lifetime |
* +------+----+-------------+-----------+---------------+---------------+
*
* we will end up with
*
* +------+----+-------------+-----------+---------------+
* | base | SA | source addr | dest addr | soft lifetime |
* +------+----+-------------+-----------+---------------+
*/
static void
{
int copylen;
/*
* Aha! I found a header to be erased.
*/
/*
* If I had a previous header to be erased,
* copy over it. I can get away with just
* copying backwards because the target will
* always be 8 bytes behind the source.
*/
copylen);
} else {
}
} else {
}
}
if (copylen != 0)
}
/* Adjust samsg. */
/* Assume all of the rest is cleared by caller in sadb_pfkey_echo(). */
}
/*
* AH needs to send an error to PF_KEY. Assume mp points to an M_CTL
* followed by an M_DATA with a PF_KEY message in it. The serial of
* the sending keysock instance is included.
*/
void
{
/*
* Enough functions call this to merit a NULL queue check.
*/
return;
}
/*
* Only send the base message up in the event of an error.
* Don't worry about bzero()-ing, because it was probably bogus
* anyway.
*/
if (diagnostic != SADB_X_DIAGNOSTIC_PRESET)
}
/*
* Send a successful return packet back to keysock via the queue in pfkey_q.
*
* Often, an SA is associated with the reply message, it's passed in if needed,
* and NULL if not. BTW, that ipsa will have its refcnt appropriately held,
* and the caller will release said refcnt.
*/
void
{
switch (samsg->sadb_msg_type) {
case SADB_ADD:
case SADB_UPDATE:
case SADB_FLUSH:
case SADB_DUMP:
/*
* I have all of the message already. I just need to strip
* out the keying material and echo the message back.
*
* NOTE: for SADB_DUMP, the function sadb_dump() did the
* work. When DUMP reaches here, it should only be a base
* message.
*/
/* Assume PF_KEY message is contiguous. */
}
break;
case SADB_GET:
/*
* Do a lot of work here, because of the ipsa I just found.
* First construct the new PF_KEY message, then abandon
* the old one.
*/
return;
}
break;
case SADB_DELETE:
goto justecho;
/*
* Because listening KMds may require more info, treat
* DELETE like a special case of GET.
*/
return;
}
break;
default:
return;
}
/* ksi is now null and void. */
/* We're ready to send... */
}
/*
* Set up a global pfkey_q instance for AH, ESP, or some other consumer.
*/
void
{
/*
* First, check atomically that I'm the first and only keysock
* instance.
*
* Use OTHERQ(q), because qreply(q, mp) == putnext(OTHERQ(q), mp),
* and I want this module to say putnext(*_pfkey_q, mp) for PF_KEY
* messages.
*/
return;
}
/*
* If we made it past the casptr, then we have "exclusive" access
* to the timeout handle. Fire it off in 4 seconds, because it
* just seems like a good interval.
*/
}
/*
* Normalize IPv4-mapped IPv6 addresses (and prefixes) as appropriate.
*
* Check addresses themselves for wildcard or multicast.
*/
int
netstack_t *ns)
{
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6;
int diagnostic, type;
/* Assign both sockaddrs, the compiler will do the right thing. */
/*
* Convert to an AF_INET sockaddr. This means the
* return messages will have the extra space, but have
* AF_INET sockaddrs instead of AF_INET6.
*
* Yes, RFC 2367 isn't clear on what to do here w.r.t.
* mapped addresses, but since AF_INET6 ::ffff:<v4> is
* equal to AF_INET <v4>, it shouldnt be a huge
* problem.
*/
normalized = B_TRUE;
}
switch (ext->sadb_ext_type) {
case SADB_EXT_ADDRESS_SRC:
break;
case SADB_EXT_ADDRESS_DST:
break;
break;
break;
break;
break;
/* There is no default, see above ASSERT. */
}
bail:
serial);
} else {
/*
* Scribble in sadb_msg that we got passed in.
* Overload "mp" to be an sadb_msg pointer.
*/
}
return (KS_IN_ADDR_UNKNOWN);
}
/*
* We need only check for prefix issues.
*/
/* Set diagnostic now, in case we need it later. */
if (normalized)
/*
* Verify and mask out inner-addresses based on prefix length.
*/
goto bail;
} else {
/*
* ip_plen_to_mask_v6() returns NULL if the value in
* question is out of range.
*/
goto bail;
}
/* We don't care in these cases. */
return (KS_IN_ADDR_DONTCARE);
}
/* Check the easy ones now. */
return (KS_IN_ADDR_MBCAST);
return (KS_IN_ADDR_UNSPEC);
/*
* At this point, we're a unicast IPv6 address.
*
* A ctable lookup for local is sufficient here. If we're
* local, return KS_IN_ADDR_ME, otherwise KS_IN_ADDR_NOTME.
*
* by what zone we're in when we go to zone-aware IPsec.
*/
ns->netstack_ip);
/* Hey hey, it's local. */
return (KS_IN_ADDR_ME);
}
} else {
return (KS_IN_ADDR_UNSPEC);
return (KS_IN_ADDR_MBCAST);
/*
* At this point we're a unicast or broadcast IPv4 address.
*
* Lookup on the ctable for IRE_BROADCAST or IRE_LOCAL.
* A NULL return value is NOTME, otherwise, look at the
* returned ire for broadcast or not and return accordingly.
*
* by what zone we're in when we go to zone-aware IPsec.
*/
/* Check for local or broadcast */
}
}
return (KS_IN_ADDR_NOTME);
}
/*
* Address normalizations and reality checks for inbound PF_KEY messages.
*
* For the case of src == unspecified AF_INET6, and dst == AF_INET, convert
* the source to AF_INET. Do the same for the inner sources.
*/
{
int rc;
if (rc == KS_IN_ADDR_UNKNOWN)
return (B_FALSE);
if (rc == KS_IN_ADDR_MBCAST) {
return (B_FALSE);
}
}
if (rc == KS_IN_ADDR_UNKNOWN)
return (B_FALSE);
if (rc == KS_IN_ADDR_UNSPEC) {
return (B_FALSE);
}
}
/*
* NAT-Traversal addrs are simple enough to not require all of
* the checks in sadb_addrcheck(). Just normalize or reject if not
* AF_INET.
*/
/*
* NATT addresses never use an IRE_LOCAL, so it should
* always be NOTME, or UNSPEC if it's a tunnel-mode SA.
*/
if (rc != KS_IN_ADDR_NOTME &&
rc == KS_IN_ADDR_UNSPEC)) {
if (rc != KS_IN_ADDR_UNKNOWN)
ksi->ks_in_serial);
return (B_FALSE);
}
src = (struct sockaddr_in *)
ksi->ks_in_serial);
return (B_FALSE);
}
}
/*
* NATT addresses never use an IRE_LOCAL, so it should
* always be NOTME, or UNSPEC if it's a tunnel-mode SA.
*/
if (rc != KS_IN_ADDR_NOTME &&
rc == KS_IN_ADDR_UNSPEC)) {
if (rc != KS_IN_ADDR_UNKNOWN)
ksi->ks_in_serial);
return (B_FALSE);
}
src = (struct sockaddr_in *)
ksi->ks_in_serial);
return (B_FALSE);
}
}
ksi->ks_in_serial);
return (B_FALSE);
}
== KS_IN_ADDR_UNKNOWN ||
return (B_FALSE);
isrc = (struct sockaddr_in *)
1);
idst = (struct sockaddr_in6 *)
1);
ksi->ks_in_serial);
return (B_FALSE);
}
ksi->ks_in_serial);
return (B_FALSE);
} else {
}
return (B_TRUE);
/* Can't set inner and outer ports in one SA. */
ksi->ks_in_serial);
return (B_FALSE);
}
return (B_TRUE);
if (srcext->sadb_address_proto == 0) {
} else if (dstext->sadb_address_proto == 0) {
} else {
/* Inequal protocols, neither were 0. Report error. */
ksi->ks_in_serial);
return (B_FALSE);
}
}
/*
* With the exception of an unspec IPv6 source and an IPv4
* destination, address families MUST me matched.
*/
return (B_FALSE);
}
/*
* Convert "src" to AF_INET INADDR_ANY. We rely on sin_port being
* in the same place for sockaddr_in and sockaddr_in6.
*/
return (B_TRUE);
}
/*
* Set the results in "addrtype", given an IRE as requested by
* sadb_addrcheck().
*/
int
{
return (KS_IN_ADDR_MBCAST);
return (KS_IN_ADDR_ME);
return (KS_IN_ADDR_NOTME);
}
/*
* Assumes that we're called with *head locked, no other locks held;
* Conveniently, and not coincidentally, this is both what sadb_walker
* gives us and also what sadb_unlinkassoc expects.
*/
struct sadb_purge_state
{
char *sidstr;
char *didstr;
};
static void
{
return;
}
}
/*
* Common code to purge an SA with a matching src or dst address.
* Don't kill larval SA's in such a purge.
*/
int
{
sadb_x_kmc_t *kmc =
struct sadb_purge_state ps;
/*
* Don't worry about IPv6 v4-mapped addresses, sadb_addrcheck()
* takes care of them.
*/
/* enforced by caller */
#ifdef DEBUG
#endif
} else {
}
}
} else {
}
}
/*
* NOTE: May need to copy string in the future
* if the inbound keysock message disappears for some strange
* reason.
*/
}
/*
* NOTE: May need to copy string in the future
* if the inbound keysock message disappears for some strange
* reason.
*/
}
/*
* This is simple, crude, and effective.
* Unimplemented optimizations (TBD):
* - we can limit how many places we search based on where we
* think the SA is filed.
* - if we get a dst address, we can hash based on dst addr to find
* the correct bucket in the outbound table.
*/
NULL);
return (0);
}
/*
*/
int
{
return (EINVAL);
}
return (EINVAL);
}
/*
* Don't worry about IPv6 v4-mapped addresses, sadb_addrcheck()
* takes care of them.
*/
} else {
}
} else {
} else {
}
}
/* Lock down both buckets. */
/* Try outbound first. */
} else {
}
return (ESRCH);
}
if (delete) {
/* At this point, I have one or two SAs to be deleted. */
if (outbound_target != NULL) {
}
if (inbound_target != NULL) {
}
}
/*
* Because of the multi-line macro nature of IPSA_REFRELE, keep
* them in { }.
*/
if (outbound_target != NULL) {
}
if (inbound_target != NULL) {
}
return (0);
}
/*
* Initialize the mechanism parameters associated with an SA.
* These parameters can be shared by multiple packets, which saves
* us from the overhead of consulting the algorithm table for
* each packet.
*/
static void
{
} else
}
} else
}
}
/*
* Perform NAT-traversal cached checksum offset calculations here.
*/
static void
{
uint32_t running_sum = 0;
if (natt_rem_ext != NULL) {
/* Ensured by sadb_addrfix(). */
l_src = *src_addr_ptr;
l_rem = *natt_rem_ptr;
/* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */
/*
* We're 1's complement for checksums, so check for wraparound
* here.
*/
l_src--;
}
if (natt_loc_ext != NULL) {
/* Ensured by sadb_addrfix(). */
/* TODO - future port flexibility beyond 4500. */
l_dst = *dst_addr_ptr;
l_loc = *natt_loc_ptr;
/* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */
/*
* We're 1's complement for checksums, so check for wraparound
* here.
*/
l_dst--;
}
}
/*
* This function is called from consumers that need to insert a fully-grown
* security association into its tables. This function takes into account that
* SAs can be "inbound", "outbound", or "both". The "primary" and "secondary"
* hash bucket parameters are set in order of what the SA will be most of the
* time. (For example, an SA with an unspecified source, and a multicast
* destination will primarily be an outbound SA. OTOH, if that destination
* is unicast for this node, then the SA will primarily be inbound.)
*
* It takes a lot of parameters because even if clone is B_FALSE, this needs
* to check both buckets for purposes of collision.
*
* Return 0 upon success. Return various errnos (ENOMEM, EEXIST) for
* various error conditions. We may need to set samsg->sadb_x_msg_diagnostic
* with additional diagnostic information because there is at least one EINVAL
* case here.
*/
int
netstack_t *ns)
{
#if 0
/*
* XXXMLS - When Trusted Solaris or Multi-Level Secure functionality
* comes to ON, examine these if 0'ed fragments. Look for XXXMLS.
*/
#endif
int error = 0;
} else {
}
} else {
}
if (!isupdate) {
return (ENOMEM);
}
if (srcext->sadb_address_proto != 0) {
/*
* Mismatched outer-packet protocol
* and inner-packet address family.
*/
error = EPROTOTYPE;
goto error;
} else {
/* Fill in with explicit protocol. */
}
}
} else {
if (srcext->sadb_address_proto != 0) {
/*
* Mismatched outer-packet protocol
* and inner-packet address family.
*/
error = EPROTOTYPE;
goto error;
} else {
/* Fill in with explicit protocol. */
}
}
}
/* Unique value uses inner-ports for Tunnel Mode... */
} else {
/* ... and outer-ports for Transport Mode. */
}
/*
* Use |= because we set unique fields above. UNIQUE is filtered
* out before we reach here so it's not like we're sabotaging anything.
* ASSERT we're either 0 or UNIQUE for good measure, though.
*/
goto error;
}
/*
* If unspecified source address, force replay_wsize to 0.
* This is because an SA that has multiple sources of secure
* traffic cannot enforce a replay counter w/o synchronizing the
* senders.
*/
else
newbie->ipsa_replay_wsize = 0;
}
/*
* XXX CURRENT lifetime checks MAY BE needed for an UPDATE.
* The spec says that one can update current lifetimes, but
* that seems impractical, especially in the larval-to-mature
* update that this function performs.
*/
}
}
/* In case we have to round up to the next byte... */
goto error;
}
/*
* Pre-initialize the kernel crypto framework key
* structure.
*/
if (error != 0) {
goto error;
}
}
/* In case we have to round up to the next byte... */
goto error;
}
/* XXX is this safe w.r.t db_ref, etc? */
/*
* Pre-initialize the kernel crypto framework key
* structure.
*/
if (error != 0) {
goto error;
}
}
/*
* Ptrs to processing functions.
*/
else
/*
* Certificate ID stuff.
*/
sadb_ident_t *id =
/*
* Can assume strlen() will return okay because ext_check() in
* keysock.c prepares the string for us.
*/
goto error;
}
}
sadb_ident_t *id =
/*
* Can assume strlen() will return okay because ext_check() in
* keysock.c prepares the string for us.
*/
goto error;
}
}
#if 0
/* XXXMLS SENSITIVITY handling code. */
int i;
goto error;
}
goto error;
}
for (i = 0; i < sens->sadb_sens_sens_len; i++) {
bitmap++;
}
for (i = 0; i < sens->sadb_sens_integ_len; i++) {
bitmap++;
}
}
#endif
/* now that the SA has been updated, set its new state */
/*
* The less locks I hold when doing an insertion and possible cloning,
* the better!
*/
if (clone) {
if (newbie_clone == NULL) {
goto error;
}
}
/*
* Enter the bucket locks. The order of entry is outbound,
* inbound. We map "primary" and "secondary" into outbound and inbound
* based on the destination address type. If the destination address
* type is for a node that isn't mine (or potentially mine), the
* "primary" bucket is the outbound one.
*/
/* primary == outbound */
} else {
/* primary == inbound */
}
/*
* sadb_insertassoc() doesn't increment the reference
* count. We therefore have to increment the
* reference count one more time to reflect the
* pointers of the table that reference this SA.
*/
if (isupdate) {
/*
* Unlink from larval holding cell in the "inbound" fanout.
*/
}
if (error == 0) {
}
if (error != 0) {
/*
* Since sadb_insertassoc() failed, we must decrement the
* refcount again so the cleanup code will actually free
* the offending SA.
*/
goto error_unlock;
}
if (newbie_clone != NULL) {
if (error != 0) {
/* Collision in secondary table. */
goto error_unlock;
}
} else {
/* Collision in secondary table. */
/* Set the error, since ipsec_getassocbyspi() can't. */
goto error_unlock;
}
}
/* OKAY! So let's do some reality check assertions. */
/*
* If hardware acceleration could happen, send it.
*/
}
/*
* We can exit the locks in any order. Only entrance needs to
* follow any protocol.
*/
/* Common error point for this routine. */
}
if (newbie_clone != NULL) {
}
if (error == 0) {
/*
* Construct favorable PF_KEY return message and send to
* keysock. (Q: Do I need to pass "newbie"? If I do,
* make sure to REFHOLD, call, then REFRELE.)
*/
}
return (error);
}
/*
* Set the time of first use for a security association. Update any
* expiration times as a result.
*/
void
{
/*
* Caller does check usetime before calling me usually, and
* double-checking is better than a mutex_enter/exit hit.
*/
if (assoc->ipsa_usetime == 0) {
/*
* This is redundant for outbound SA's, as
* ipsec_getassocbyconn() sets the IPSA_F_USED flag already.
* Inbound SAs, however, have no such protection.
*/
/*
* After setting the use time, see if we have a use lifetime
* that would cause the actual SA expiration time to shorten.
*/
}
}
/*
* Send up a PF_KEY expire message for this association.
*/
static void
{
/* Don't bother sending if there's no queue. */
return;
mp = sadb_keysock_out(0);
/* cmn_err(CE_WARN, */
/* "sadb_expire_assoc: Can't allocate KEYSOCK_OUT.\n"); */
return;
}
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* Won't happen unless there's a kernel bug. */
"sadb_expire_assoc: Unknown address length.\n");
return;
}
if (tunnel_mode) {
switch (assoc->ipsa_innerfam) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* Won't happen unless there's a kernel bug. */
"Unknown inner address length.\n");
return;
}
}
/* cmn_err(CE_WARN, */
/* "sadb_expire_assoc: Can't allocate message.\n"); */
return;
}
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_reserved = 0;
samsg->sadb_msg_seq = 0;
samsg->sadb_msg_pid = 0;
} else {
}
if (tunnel_mode) {
}
/* Can just putnext, we're ready to go! */
}
/*
* "Age" the SA with the number of bytes that was used to protect traffic.
* Send an SADB_EXPIRE message if appropriate. Return B_TRUE if there was
* enough "charge" left in the SA to protect the data. Return B_FALSE
* otherwise. (If B_FALSE is returned, the association either was, or became
* DEAD.)
*/
{
if (assoc->ipsa_hardbyteslt != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread. Also
* unlink this SA immediately.
*/
if (sendmsg)
/*
* Set non-zero expiration time so sadb_age_assoc()
* will work when reaping.
*/
} /* Else someone beat me to it! */
} else if (assoc->ipsa_softbyteslt != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread.
*/
if (sendmsg)
} /* Else someone beat me to it! */
}
return (rc);
}
/*
* Push one or more DL_CO_DELETE messages queued up by
* sadb_torch_assoc down to the underlying driver now that it's a
* convenient time for it (i.e., ipsa bucket locks not held).
*/
static void
{
if (q != NULL)
else
}
}
/*
* "Torch" an individual SA. Returns NULL, so it can be tail-called from
* sadb_age_assoc().
*
* If SA is hardware-accelerated, and we can't allocate the mblk
* containing the DL_CO_DELETE, just return; it will remain in the
* table and be swept up by sadb_ager() in a subsequent pass.
*/
static ipsa_t *
{
/*
* Force cached SAs to be revalidated..
*/
return (NULL);
}
}
return (NULL);
}
/*
* Return "assoc" iff haspeer is true and I send an expire. This allows
* the consumers' aging functions to tidy up an expired SA's peer.
*/
static ipsa_t *
{
}
/*
* Check lifetimes. Fortunately, SA setup is done
* such that there are only two times to look at,
* softexpiretime, and hardexpiretime.
*
* Check hard first.
*/
if (assoc->ipsa_hardexpiretime != 0 &&
/*
* Send SADB_EXPIRE with hard lifetime, delay for unlinking.
*/
if (assoc->ipsa_haspeer) {
/*
* If I return assoc, I have to bump up its
* reference count to keep with the ipsa_t reference
* count semantics.
*/
}
} else if (assoc->ipsa_softexpiretime != 0 &&
/*
* Send EXPIRE message to PF_KEY. May wish to pawn
* this off on another non-interrupt thread.
*/
if (assoc->ipsa_haspeer) {
/*
* If I return assoc, I have to bump up its
* reference count to keep with the ipsa_t reference
* count semantics.
*/
}
}
return (retval);
}
/*
* Called by a consumer protocol to do ther dirty work of reaping dead
* Security Associations.
*/
void
netstack_t *ns)
{
int i;
struct templist {
int outhash;
/*
* Do my dirty work. This includes aging real entries, aging
* larvals, and aging outstanding ACQUIREs.
*
* I hope I don't tie up resources for too long.
*/
/* Snapshot current time now. */
/* Age acquires. */
for (i = 0; i < sp->sdb_hashsize; i++) {
}
}
/* Age inbound associations. */
for (i = 0; i < sp->sdb_hashsize; i++) {
/*
* sadb_age_assoc() increments the refcnt,
* effectively doing an IPSA_REFHOLD().
*/
/*
* Don't forget to REFRELE().
*/
continue; /* for loop... */
}
}
}
}
}
/*
* Haspeer cases will contain both IPv4 and IPv6. This code
* is address independent.
*/
while (haspeerlist != NULL) {
/* "spare" contains the SA that has a peer. */
/*
* Pick peer bucket based on addrfam.
*/
} else {
}
}
}
/* Age outbound associations. */
for (i = 0; i < sp->sdb_hashsize; i++) {
/*
* sadb_age_assoc() increments the refcnt,
* effectively doing an IPSA_REFHOLD().
*/
/*
* Don't forget to REFRELE().
*/
continue; /* for loop... */
}
}
}
}
}
/*
* Haspeer cases will contain both IPv4 and IPv6. This code
* is address independent.
*/
while (haspeerlist != NULL) {
/* "spare" contains the SA that has a peer. */
/*
* Pick peer bucket based on addrfam.
*/
}
}
/*
* Run a GC pass to clean out dead identities.
*/
}
/*
* Figure out when to reschedule the ager.
*/
{
/*
* See how long this took. If it took too long, increase the
* aging interval.
*/
/* XXX Rate limit this? Or recommend flush? */
"Too many SA's to age out in %d msec.\n",
intmax);
} else {
/* Double by shifting by one bit. */
interval <<= 1;
}
/*
* If I took less than half of the interval, then I should
* ratchet the interval back down. Never automatically
* shift below the default aging interval.
*
* NOTE:This even overrides manual setting of the age
* interval using NDD.
*/
/* Halve by shifting one bit. */
interval >>= 1;
}
}
/*
* Update the lifetime values of an SA. This is the path an SADB_UPDATE
* message takes when updating a MATURE or DYING SA.
*/
static void
{
/*
* XXX RFC 2367 mentions how an SADB_EXT_LIFETIME_CURRENT can be
* passed in during an update message. We currently don't handle
* these.
*/
if (hard->sadb_lifetime_bytes != 0)
if (hard->sadb_lifetime_usetime != 0)
if (hard->sadb_lifetime_addtime != 0)
if (assoc->ipsa_hardaddlt != 0) {
}
if (assoc->ipsa_harduselt != 0) {
if (assoc->ipsa_hardexpiretime != 0) {
} else {
}
}
if (hard->sadb_lifetime_allocations != 0)
}
if (soft->sadb_lifetime_bytes != 0)
if (soft->sadb_lifetime_usetime != 0)
if (soft->sadb_lifetime_addtime != 0)
if (assoc->ipsa_softaddlt != 0) {
}
if (assoc->ipsa_softuselt != 0) {
if (assoc->ipsa_softexpiretime != 0) {
} else {
}
}
if (soft->sadb_lifetime_allocations != 0)
}
}
/*
* Common code to update an SA.
*/
int
netstack_t *ns)
{
int error = 0;
/* I need certain extensions present for either UPDATE message. */
return (EINVAL);
}
return (EINVAL);
}
return (EINVAL);
}
}
} else {
}
/* Lock down both buckets. */
/* Try outbound first. */
if (outbound_target == NULL) {
if (inbound_target == NULL) {
return (ESRCH);
/*
* REFRELE the target and let the add_sa_func()
* deal with updating a larval SA.
*/
}
}
/*
* Reality checks for updates of active associations.
* Sundry first-pass UPDATE-specific reality checks.
* Have to do the checks here, because it's after the add_sa code.
*/
goto bail;
}
goto bail;
}
error = EOPNOTSUPP;
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
if (outbound_target != NULL) {
goto bail;
}
if ((kmp != 0) &&
((outbound_target->ipsa_kmp != 0) ||
goto bail;
}
if ((kmc != 0) &&
((outbound_target->ipsa_kmc != 0) ||
goto bail;
}
}
if (inbound_target != NULL) {
goto bail;
}
if ((kmp != 0) &&
((inbound_target->ipsa_kmp != 0) ||
goto bail;
}
if ((kmc != 0) &&
((inbound_target->ipsa_kmc != 0) ||
goto bail;
}
}
if (outbound_target != NULL) {
if (kmp != 0)
if (kmc != 0)
}
if (inbound_target != NULL) {
if (kmp != 0)
if (kmc != 0)
}
bail:
/*
* Because of the multi-line macro nature of IPSA_REFRELE, keep
* them in { }.
*/
if (outbound_target != NULL) {
}
if (inbound_target != NULL) {
}
return (error);
}
/*
* The following functions deal with ACQUIRE LISTS. An ACQUIRE list is
* a list of outstanding SADB_ACQUIRE messages. If ipsec_getassocbyconn() fails
* for an outbound datagram, that datagram is queued up on an ACQUIRE record,
* and an SADB_ACQUIRE message is sent up. Presumably, a user-space key
* management daemon will process the ACQUIRE, use a SADB_GETSPI to reserve
* an SPI value and a larval SA, then SADB_UPDATE the larval SA, and ADD the
* other direction's SA.
*/
/*
* Check the ACQUIRE lists. If there's an existing ACQUIRE record,
* grab it, lock it, and return it. Otherwise return NULL.
*/
static ipsacq_t *
{
}
/*
*
* XXX May need search for duplicates based on other things too!
*/
break; /* everything matched */
}
return (walker);
}
/*
* For this mblk, insert a new acquire record. Assume bucket contains addrs
* of all of the same length. Give up (and drop) if memory
* cannot be allocated for a new one; otherwise, invoke callback to
* send the acquire up..
*
* In cases where we need both AH and ESP, add the SA to the ESP ACQUIRE
* list. The ah_add_sa_finish() routines can look at the packet's ipsec_out_t
* and handle this case specially.
*/
void
{
int hashoffset;
/* Assign sadb pointers */
if (need_esp) { /* ESP for AH+ESP */
} else {
}
/*
* Set up an ACQUIRE record.
*
* Immediately, make sure the ACQUIRE sequence number doesn't slip
* below the lowest point allowed in the kernel. (In other words,
* make sure the high bit on the sequence number is set.)
*/
} else {
}
if (tunnel_mode) {
/* Snag inner addresses. */
} else {
}
/*
* Check buckets to see if there is an existing entry. If so,
* grab it. sadb_checkacquire locks newbie if found.
*/
/*
* Otherwise, allocate a new one.
*/
return;
}
}
}
/*
* This assert looks silly for now, but we may need to enter newbie's
* mutex during a search.
*/
/* Queue up packet. Use b_next. */
if (newbie->ipsacq_numpackets == 0) {
/* First one. */
/*
* Extended ACQUIRE with both AH+ESP will use ESP's timeout
* value.
*/
if (tunnel_mode) {
} else {
}
} else {
/* Scan to the end of the list & insert. */
} else {
}
}
/*
* Reset addresses. Set them to the most recently added mblk chain,
* so that the address pointers in the acquire record will point
* at an mblk still attached to the acquire list.
*/
/*
* If the acquire record has more than one queued packet, we've
* already sent an ACQUIRE, and don't need to repeat ourself.
*/
/* I have an acquire outstanding already! */
return;
}
if (keysock_extended_reg(ns)) {
/*
* Construct an extended ACQUIRE. There are logging
* opportunities here in failure cases.
*/
if (tunnel_mode) {
} else {
}
sel.ips_is_icmp_inv_acq = 0;
} else {
}
extended = sadb_keysock_out(0);
}
}
} else
/*
* Send an ACQUIRE message (and possible an extended ACQUIRE) based on
* this new record. The send-acquire callback assumes that acqrec is
* already locked.
*/
}
/*
* Unlink and free an acquire record.
*/
void
{
}
}
/* Unlink */
/*
* Free hanging mp's.
*
* XXX Instead of freemsg(), perhaps use IPSEC_REQ_FAILED.
*/
}
/* Free */
}
/*
* Destroy an acquire list fanout.
*/
static void
netstack_t *ns)
{
int i;
return;
for (i = 0; i < numentries; i++) {
if (forever)
}
if (forever) {
}
}
/*
* Create an algorithm descriptor for an extended ACQUIRE. Filter crypto
* framework's view of reality vs. IPsec's. EF's wins, BTW.
*/
static uint8_t *
{
return (NULL);
/*
* Normalize vs. crypto framework's limits. This way, you can specify
* a stronger policy, and when the framework loads a stronger version,
* you can just keep plowing w/o rewhacking your SPD.
*/
return (cur);
}
/*
* Convert the given ipsec_action_t into an ecomb starting at *ecomb
* which must fit before *limit
*
* return NULL if we ran out of room or a pointer to the end of the ecomb.
*/
static uint8_t *
netstack_t *ns)
{
return (NULL);
ecomb->sadb_x_ecomb_numalgs = 0;
ecomb->sadb_x_ecomb_reserved = 0;
ecomb->sadb_x_ecomb_reserved2 = 0;
/*
* No limits on allocations, since we really don't support that
* concept currently.
*/
/*
* XXX TBD: Policy or global parameters will eventually be
* able to fill in some of these.
*/
ecomb->sadb_x_ecomb_flags = 0;
ecomb->sadb_x_ecomb_soft_bytes = 0;
ecomb->sadb_x_ecomb_hard_bytes = 0;
if (ipp->ipp_use_ah) {
return (NULL);
}
if (ipp->ipp_use_esp) {
if (ipp->ipp_use_espa) {
return (NULL);
}
return (NULL);
/* Fill in lifetimes if and only if AH didn't already... */
if (!ipp->ipp_use_ah)
}
return (cur);
}
/*
* Construct an extended ACQUIRE message based on a selector and the resulting
* IPsec action.
*
* NOTE: This is used by both inverse ACQUIRE and actual ACQUIRE
* generation. As a consequence, expect this function to evolve
* rapidly.
*/
static mblk_t *
netstack_t *ns)
{
/*
* Find the action we want sooner rather than later..
*/
} else {
}
/*
* Just take a swag for the allocation for now. We can always
* alter it later.
*/
#define SADB_EXTENDED_ACQUIRE_SIZE 2048
return (NULL);
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_reserved = 0;
samsg->sadb_msg_satype = 0;
if (tunnel_mode) {
/*
* Form inner address extensions based NOT on the inner
* selectors (i.e. the packet data), but on the policy's
* selector key (i.e. the policy's selector information).
*
* NOTE: The position of IPv4 and IPv6 addresses is the
* same in ipsec_selkey_t (unless the compiler does very
* strange things with unions, consult your local C language
* lawyer for details).
*/
} else {
}
} else {
pfxlen = 0;
}
ipsl->ipsl_lport : 0;
ipsl->ipsl_proto : 0;
return (NULL);
}
} else {
pfxlen = 0;
}
ipsl->ipsl_rport : 0;
return (NULL);
}
/*
* TODO - if we go to 3408's dream of transport mode IP-in-IP
* _with_ inner-packet address selectors, we'll need to further
* distinguish tunnel mode here. For now, having inner
*
* outer addresses.
*/
proto = 0;
lport = 0;
rport = 0;
}
} else {
}
/*
* NOTE: The position of IPv4 and IPv6 addresses is the same in
* ipsec_selector_t.
*/
return (NULL);
}
return (NULL);
}
/*
* This section will change a lot as policy evolves.
* For now, it'll be relatively simple.
*/
/* no space left */
return (NULL);
}
eprop->sadb_x_prop_ereserved = 0;
eprop->sadb_x_prop_numecombs = 0;
/*
* Skip non-IPsec policies
*/
continue;
}
return (NULL);
}
}
if (eprop->sadb_x_prop_numecombs == 0) {
/*
* This will happen if we fail to find a policy
* allowing for IPsec processing.
* Construct an error message.
*/
samsg->sadb_x_msg_diagnostic = 0;
return (mp);
}
return (NULL);
}
}
return (mp);
}
/*
* Generic setup of an RFC 2367 ACQUIRE message. Caller sets satype.
*
* NOTE: This function acquires alg_lock as a side-effect if-and-only-if we
* succeed (i.e. return non-NULL). Caller MUST release it. This is to
* maximize code consolidation while preventing algorithm changes from messing
* with the callers finishing touches on the ACQUIRE itself.
*/
mblk_t *
{
pfkeymp = sadb_keysock_out(0);
return (NULL);
/*
* First, allocate a basic ACQUIRE message
*/
sizeof (sadb_address_t) + sizeof (sadb_prop_t);
/* Make sure there's enough to cover both AF_INET and AF_INET6. */
/* NOTE: The lock is now held through to this function's return. */
if (tunnel_mode) {
/* Tunnel mode! */
/* Enough to cover both AF_INET and AF_INET6. */
}
return (NULL);
}
cur += sizeof (sadb_msg_t);
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
/* This should never happen unless we have kernel bugs. */
"sadb_setup_acquire: corrupt ACQUIRE record.\n");
ASSERT(0);
return (NULL);
}
samsg->sadb_msg_errno = 0;
samsg->sadb_msg_pid = 0;
samsg->sadb_msg_reserved = 0;
sport_typecode = dport_typecode = 0;
} else {
}
if (tunnel_mode) {
}
/* XXX Insert identity information here. */
/* XXXMLS Insert sensitivity information here. */
else
return (pfkeymp);
}
/*
* Given an SADB_GETSPI message, find an appropriately ranged SA and
* allocate an SA. If there are message improprieties, return (ipsa_t *)-1.
* If there was a memory allocation error, return NULL. (Assume NULL !=
* (ipsa_t *)-1).
*
* master_spi is passed in host order.
*/
ipsa_t *
netstack_t *ns)
{
return ((ipsa_t *)-1);
}
return ((ipsa_t *)-1);
}
return ((ipsa_t *)-1);
}
switch (af) {
case AF_INET:
break;
case AF_INET6:
break;
default:
return ((ipsa_t *)-1);
}
/* Return a random value in the range. */
}
/*
* Since master_spi is passed in host order, we need to htonl() it
* for the purposes of creating a new SA.
*/
ns));
}
/*
*
* Locate an ACQUIRE and nuke it. If I have an samsg that's larger than the
* base header, just ignore it. Otherwise, lock down the whole ACQUIRE list
* and scan for the sequence number in question. I may wish to accept an
* address pair with it, for easier searching.
*
* Caller frees the message, so we don't have to here.
*
* NOTE: The ip_q parameter may be used in the future for ACQUIRE
* failures.
*/
/* ARGSUSED */
void
{
int i;
/*
* I only accept the base header for this!
* Though to be honest, requiring the dst address would help
* immensely.
*
* XXX There are already cases where I can get the dst address.
*/
return;
/*
* Using the samsg->sadb_msg_seq, find the ACQUIRE record, delete it,
* (and in the future send a message to IP with the appropriate error
* number).
*
* Q: Do I want to reject if pid != 0?
*/
break; /* for acqrec... loop. */
}
break; /* for i = 0... loop. */
}
break; /* for acqrec... loop. */
}
break; /* for i = 0... loop. */
}
}
return;
/*
* What do I do with the errno and IP? I may need mp's services a
* little more. See sadb_destroy_acquire() for future directions
* beyond free the mblk chain on the acquire record.
*/
/* Have to exit mutex here, because of breaking out of for loop. */
}
/*
* The following functions work with the replay windows of an SA. They assume
* the ipsa->ipsa_replay_arr is an array of uint64_t, and that the bit vector
* represents the highest sequence number packet received, and back
* (ipsa->ipsa_replay_wsize) packets.
*/
/*
* Is the replay bit set?
*/
static boolean_t
{
}
/*
* Shift the bits of the replay window over.
*/
static void
{
int i;
if (shift == 0)
return;
}
}
}
/*
* Set a bit in the bit vector.
*/
static void
{
}
#define SADB_MAX_REPLAY_VALUE 0xffffffff
/*
* Assume caller has NOT done ntohl() already on seq. Check to see
* if replay sequence number "seq" has been seen already.
*/
{
if (ipsa->ipsa_replay_wsize == 0)
return (B_TRUE);
/*
* NOTE: I've already checked for 0 on the wire in sadb_replay_peek().
*/
/* Convert sequence number into host order before holding the mutex. */
/* Initialize inbound SA's ipsa_replay field to last one received. */
if (ipsa->ipsa_replay == 0)
/*
* I have received a new "highest value received". Shift
* the replay window over.
*/
/* In replay window, shift bits over. */
} else {
/* WAY FAR AHEAD, clear bits and start again. */
sizeof (ipsa->ipsa_replay_arr));
}
ipsa_set_replay(ipsa, 0);
goto done;
}
goto done;
}
/* Set this packet as seen. */
done:
return (rc);
}
/*
* "Peek" and see if we should even bother going through the effort of
* running an authentication check on the sequence number passed in.
* this takes into account packets that are below the replay window,
* and collisions with already replayed packets. Return B_TRUE if it
* is okay to proceed, B_FALSE if this packet should be dropped immediately.
* Assume same byte-ordering as sadb_replay_check.
*/
{
if (ipsa->ipsa_replay_wsize == 0)
return (B_TRUE);
/*
* 0 is 0, regardless of byte order... :)
*
* If I get 0 on the wire (and there is a replay window) then the
* sender most likely wrapped. This ipsa may need to be marked or
* something.
*/
if (seq == 0)
return (B_FALSE);
goto done;
/*
* If I've hit 0xffffffff, then quite honestly, I don't need to
* bother with formalities. I'm not accepting any more packets
* on this SA.
*/
/*
* Since we're already holding the lock, update the
* expire time ala. sadb_replay_delete() and return.
*/
goto done;
}
/*
* This seq is in the replay window. I'm not below it,
* because I already checked for that above!
*/
goto done;
}
/* Else return B_TRUE, I'm going to advance the window. */
done:
return (rc);
}
/*
* Delete a single SA.
*
* For now, use the quick-and-dirty trick of making the association's
* hard-expire lifetime (time_t)1, ensuring deletion by the *_ager().
*/
void
{
}
/*
* Given a queue that presumably points to IP, send a T_BIND_REQ for _proto_
* down. The caller will handle the T_BIND_ACK locally.
*/
{
struct T_bind_req *tbr;
/* cmn_err(CE_WARN, */
/* "sadb_t_bind_req(%d): couldn't allocate mblk\n", proto); */
return (B_FALSE);
}
tbr->ADDR_length = 0;
tbr->ADDR_offset = 0;
tbr->CONIND_number = 0;
return (B_TRUE);
}
/*
* Special front-end to ipsec_rl_strlog() dealing with SA failure.
* this is designed to take only a format string with "* %x * %s *", so
* that "spi" is printed first, then "addr" is converted using inet_pton().
*
* This is abstracted out to save the stack space for only when inet_pton()
* is called. Make sure "spi" is in network order; it usually is when this
* would get called.
*/
void
{
char buf[INET6_ADDRSTRLEN];
}
/*
* Fills in a reference to the policy, if any, from the conn, in *ppp
* Releases a reference to the passed conn_t.
*/
static void
{
} else {
}
}
/*
* The following functions scan through active conn_t structures
* and return a reference to the best-matching policy it can find.
* Caller must release the reference.
*/
static void
{
if (sel->ips_local_port == 0)
return;
ipst)];
break;
}
/* Try port-only match in IPv6. */
}
}
break;
}
return;
}
}
}
static conn_t *
{
if (sel->ips_local_port == 0)
return (NULL);
ipst)];
break;
}
/* Match to all-zeroes. */
}
}
break;
}
return (NULL);
}
}
return (connp);
}
static void
{
/*
* Find TCP state in the following order:
* 1.) Connected conns.
* 2.) Listeners.
*
* Even though #2 will be the common case for inbound traffic, only
* following this order insures correctness.
*/
if (sel->ips_local_port == 0)
return;
/*
* 0 should be fport, 1 should be lport. SRC is the local one here.
* See ipsec_construct_inverse_acquire() for details.
*/
ports))
break;
}
} else {
ports))
break;
}
}
} else {
/* Try the listen hash. */
return;
}
}
static void
{
/*
* Find SCP state in the following order:
* 1.) Connected conns.
* 2.) Listeners.
*
* Even though #2 will be the common case for inbound traffic, only
* following this order insures correctness.
*/
if (sel->ips_local_port == 0)
return;
/*
* 0 should be fport, 1 should be lport. SRC is the local one here.
* See ipsec_construct_inverse_acquire() for details.
*/
} else {
}
return;
}
/*
* Fill in a query for the SPD (in "sel") using two PF_KEY address extensions.
* Returns 0 or errno, and always sets *diagnostic to something appropriate
* to PF_KEY.
*
* NOTE: For right now, this function (and ipsec_selector_t for that matter),
* ignore prefix lengths in the address extension. Since we match on first-
* entered policies, this shouldn't matter. Also, since we normalize prefix-
* set addresses to mask out the lower bits, we should get a suitable search
* key for the SPD anyway. This is the function to change if the assumption
* about suitable search keys is wrong.
*/
static int
{
*diagnostic = 0;
return (EINVAL);
}
} else {
}
} else {
return (EINVAL);
}
} else {
}
}
return (0);
}
/*
* We have encapsulation.
* - Lookup tun_t by address and look for an associated
* tunnel policy
* - If there are inner selectors
* - check ITPF_P_TUNNEL and ITPF_P_ACTIVE
* - Look up tunnel policy based on selectors
* - Else
* - Sanity check the negotation
* - If appropriate, fall through to global policy
*/
static int
{
int err;
/* Check for inner selectors and act appropriately */
/* Inner selectors present */
(ITPF_P_ACTIVE | ITPF_P_TUNNEL)) {
/*
* If inner packet selectors, we must have negotiate
* tunnel and active policy. If the tunnel has
* transport-mode policy set on it, or has no policy,
* fail.
*/
return (ENOENT);
} else {
/*
* Reset "sel" to indicate inner selectors. Pass
* inner PF_KEY address extensions for this to happen.
*/
if (err != 0) {
return (err);
}
/*
* Now look for a tunnel policy based on those inner
* selectors. (Common code is below.)
*/
}
} else {
/* No inner selectors present */
/*
* Transport mode negotiation with no tunnel policy
* configured - return to indicate a global policy
* check is needed.
*/
}
return (0);
/* Tunnel mode set with no inner selectors. */
return (ENOENT);
}
/*
* Else, this is a tunnel policy configured with ifconfig(1m)
* or "negotiate transport" with ipsecconf(1m). We have an
* itp with policy set based on any match, so don't bother
* changing fields in "sel".
*/
}
/*
* Don't default to global if we didn't find a matching policy entry.
* Instead, send ENOENT, just like if we hit a transport-mode tunnel.
*/
return (ENOENT);
return (0);
}
static void
{
if (isv4) {
} else {
}
&sel->ips_local_addr_v6)) &&
&sel->ips_remote_addr_v6)))))) {
break;
}
}
return;
}
}
/*
* Construct an inverse ACQUIRE reply based on:
*
* 1.) Current global policy.
* 2.) An conn_t match depending on what all was passed in the extv[].
* 3.) A tunnel's policy head.
* ...
* N.) Other stuff TBD (e.g. identities)
*
* If there is an error, set sadb_msg_errno and sadb_x_msg_diagnostic
* in this function so the caller can extract them where appropriately.
*
* The SRC address is the local one - just like an outbound ACQUIRE message.
*/
mblk_t *
netstack_t *ns)
{
int err;
int diagnostic;
/* Normalize addresses */
== KS_IN_ADDR_UNKNOWN) {
goto bail;
}
== KS_IN_ADDR_UNKNOWN) {
goto bail;
}
goto bail;
}
/* Check for tunnel mode and act appropriately */
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
goto bail;
}
/* Get selectors first, based on outer addresses */
if (err != 0)
goto bail;
/* Check for tunnel mode mismatches. */
sel.ips_protocol != 0))) {
err = EPROTOTYPE;
goto bail;
}
/*
* Okay, we have the addresses and other selector information.
* Let's first find a conn...
*/
switch (sel.ips_protocol) {
case IPPROTO_TCP:
break;
case IPPROTO_UDP:
break;
case IPPROTO_SCTP:
break;
case IPPROTO_ENCAP:
case IPPROTO_IPV6:
/*
* Assume sel.ips_remote_addr_* has the right address at
* that exact position.
*/
/*
* Transport-mode tunnel, make sure we fake out isel
* to contain something based on the outer protocol.
*/
} /* Else isel is initialized by ipsec_tun_pol(). */
&diagnostic, ns);
/*
* NOTE: isel isn't used for now, but in RFC 430x IPsec, it
* may be.
*/
if (err != 0)
goto bail;
break;
default:
break;
}
/*
* If we didn't find a matching conn_t or other policy head, take a
* look in the global policy.
*/
ns);
/* There's no global policy. */
diagnostic = 0;
goto bail;
}
}
/*
* message based on that, fix fields where appropriate,
* and return the message.
*/
}
return (retmp);
} else {
diagnostic = 0;
}
bail:
return (NULL);
}
/*
* ipsa_lpkt is a one-element queue, only manipulated by casptr within
* the next two functions.
*
* These functions loop calling casptr() until the swap "happens",
* turning a compare-and-swap op into an atomic swap operation.
*/
/*
* sadb_set_lpkt: Atomically swap in a value to ipsa->ipsa_lpkt and
* freemsg the previous value. free clue: freemsg(NULL) is safe.
*/
void
{
do
}
/*
* sadb_clear_lpkt: Atomically clear ipsa->ipsa_lpkt and return the
* previous value.
*/
mblk_t *
{
do
return (opkt);
}
/*
* context template when a crypto software provider is removed or
* added.
*/
struct sadb_update_alg_state {
};
static void
{
struct sadb_update_alg_state *update_state =
(struct sadb_update_alg_state *)cookie;
return;
switch (update_state->alg_type) {
case IPSEC_ALG_AUTH:
break;
case IPSEC_ALG_ENCR:
break;
default:
}
return;
}
/*
* The context template of the SA may be affected by the change
* of crypto provider.
*/
if (update_state->is_added) {
/* create the context template if not already done */
(void) ipsec_create_ctx_tmpl(entry,
}
} else {
/*
* The crypto provider was removed. If the context template
* exists but it is no longer valid, free it.
*/
}
}
/*
* Invoked by IP when an software crypto provider has been updated.
* The type and id of the corresponding algorithm is passed as argument.
* is_added is B_TRUE if the provider was added, B_FALSE if it was
* context templates associated with SAs if needed.
*/
void
netstack_t *ns)
{
struct sadb_update_alg_state update_state;
if (alg_type == IPSEC_ALG_AUTH) {
/* walk the AH tables only for auth. algorithm changes */
}
/* walk the ESP tables */
}
/*
* Creates a context template for the specified SA. This function
* is called when an SA is created and when a context template needs
* to be created due to a change of software provider.
*/
int
{
int rv;
/* get pointers to the algorithm info, context template, and key */
switch (alg_type) {
case IPSEC_ALG_AUTH:
break;
case IPSEC_ALG_ENCR:
break;
default:
}
return (EINVAL);
/* initialize the mech info structure for the framework */
mech.cm_param_len = 0;
/* create a new context template */
/*
* CRYPTO_MECH_NOT_SUPPORTED can be returned if only hardware
* providers are available for that mechanism. In that case
* we don't fail, and will generate the context template from
* the framework callback when a software provider for that
* mechanism registers.
*
* The context template is assigned the special value
* IPSEC_CTX_TMPL_ALLOC if the allocation failed due to a
* lack of memory. No attempt will be made to use
* the context template if it is set to this value.
*/
if (rv == CRYPTO_HOST_MEMORY) {
} else if (rv != CRYPTO_SUCCESS) {
if (rv != CRYPTO_MECH_NOT_SUPPORTED)
return (EINVAL);
}
return (0);
}
/*
* Destroy the context template of the specified algorithm type
* of the specified SA. Must be called while holding the SA lock.
*/
void
{
if (alg_type == IPSEC_ALG_AUTH) {
}
} else {
}
}
}
/*
* Use the kernel crypto framework to check the validity of a key received
* via keysock. Returns 0 if the key is OK, -1 otherwise.
*/
int
{
int crypto_rc;
mech.cm_param_len = 0;
switch (crypto_rc) {
case CRYPTO_SUCCESS:
return (0);
case CRYPTO_MECHANISM_INVALID:
break;
case CRYPTO_KEY_SIZE_RANGE:
break;
case CRYPTO_WEAK_KEY:
break;
}
return (-1);
}
/* ARGSUSED */
static void
{
return;
if (ipsa->ipsa_natt_q != q) {
return;
}
ipsa->ipsa_natt_ka_timer = 0;
}
/*
* Is only to be used on a nattymod queue.
*/
void
{
}