/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Delphix. All rights reserved.
*/
#include <sys/pathname.h>
#include <nfs/nfs_clnt.h>
static void exi_cache_reclaim(void *);
extern pri_t minclsyspri;
/*
* The lifetime of an auth cache entry:
* ------------------------------------
*
* An auth cache entry is created with both the auth_time
* and auth_freshness times set to the current time.
*
* Upon every client access which results in a hit, the
* auth_time will be updated.
*
* If a client access determines that the auth_freshness
* indicates that the entry is STALE, then it will be
* refreshed. Note that this will explicitly reset
* auth_time.
*
* When the REFRESH successfully occurs, then the
* auth_freshness is updated.
*
* There are two ways for an entry to leave the cache:
*
* 1) Purged by an action on the export (remove or changed)
* 2) Memory backpressure from the kernel (check against NFSAUTH_CACHE_TRIM)
*
* For 2) we check the timeout value against auth_time.
*/
/*
* Number of seconds until we mark for refresh an auth cache entry.
*/
/*
* Number of idle seconds until we yield to backpressure
* to trim a cache entry.
*/
/*
* While we could encapuslate the exi_list inside the
* exi structure, we can't do that for the auth_list.
* So, to keep things looking clean, we keep them both
* in these external lists.
*/
typedef struct refreshq_exi_node {
typedef struct refreshq_auth_node {
char *ran_netid;
/*
* Used to manipulate things on the refreshq_queue.
* Note that the refresh thread will effectively
* pop a node off of the queue, at which point it
* will no longer need to hold the mutex.
*/
/*
* If there is ever a problem with loading the
* module, then nfsauth_fini() needs to be called
* to remove state. In that event, since the
* refreshq thread has been started, they need to
* work together to get rid of state.
*/
typedef enum nfsauth_refreshq_thread_state {
static void nfsauth_free_node(struct auth_cache *);
static void nfsauth_refresh_thread(void);
static int nfsauth_cache_compar(const void *, const void *);
/*
* mountd is a server-side only daemon. This will need to be
* revisited if the NFS server is ever made zones-aware.
*/
void
{
}
void
nfsauth_init(void)
{
/*
* mountd can be restarted by smf(5). We need to make sure
* the updated door handle will safely make it to mountd_dh
*/
/*
* Allocate nfsauth cache handle
*/
NULL, 0, minclsyspri);
}
/*
* Finalization routine for nfsauth. It is important to call this routine
* before destroying the exported_lock.
*/
void
nfsauth_fini(void)
{
/*
* Prevent the nfsauth_refresh_thread from getting new
* work.
*/
if (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
/*
* Also, wait for nfsauth_refresh_thread() to exit.
*/
while (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
}
}
/*
* Walk the exi_list and in turn, walk the auth_lists and free all
* lists. In addition, free INVALID auth_cache entries.
*/
if (p->auth_state == NFS_AUTH_INVALID)
}
}
/*
* Deallocate nfsauth cache handle
*/
}
/*
* Convert the address in a netbuf to
* a hash index for the auth_cache table.
*/
static int
{
int i, h = 0;
for (i = 0; i < a->len; i++)
h ^= a->buf[i];
return (h & (AUTH_TABLESIZE - 1));
}
/*
* Mask out the components of an
* address that do not identify
* a host. For socket addresses the
* masking gets rid of the port number.
*/
static void
{
int i;
}
/*
* nfsauth4_access is used for NFS V4 auth checking. Besides doing
* the common nfsauth_access(), it will check if the client can
* have a limited access to this vnode even if the security flavor
* used does not meet the policy.
*/
int
{
int access;
/*
* There are cases that the server needs to allow the client
* to have a limited view.
*
* e.g.
* /export is shared as "sec=sys,rw=dfs-test-4,sec=krb5,rw"
*
* When the client mounts /export with sec=sys, the client
* would get a limited view with RO access on /export to see
* "home" only because the client is allowed to access
*/
/*
* Allow ro permission with LIMITED view if there is a
* sub-dir exported under vp.
*/
return (NFSAUTH_LIMITED);
}
return (access);
}
static void
{
/*
* msg is shown (at most) once per minute
*/
now = gethrestime_sec();
}
}
/*
* Callup to the mountd to get access information in the kernel.
*/
static bool_t
{
int last = 0;
/*
* so we need to call the nfsauth service in the
* mount daemon.
*/
/*
* Setup the XDR stream for encoding the arguments. Notice that
* in addition to the args having variable fields (req_netid and
* req_path), the argument data structure is itself versioned,
* so we need to make sure we can size the arguments buffer
* appropriately to encode all the args. If we can't get sizing
* info _or_ properly encode the arguments, there's really no
* point in continuting, so we fail the request.
*/
*access = NFSAUTH_DENIED;
return (FALSE);
}
XDR_DESTROY(&xdrs);
goto fail;
}
XDR_DESTROY(&xdrs);
/*
* Prepare the door arguments
*
* We don't know the size of the message the daemon
* will pass back to us. By setting rbuf to NULL,
* we force the door code to allocate a buf of the
* appropriate size. We must set rsize > 0, however,
* else the door code acts as if no response was
* expected and doesn't pass the data to us.
*/
/*
* The rendezvous point has not been established yet!
* This could mean that either mountd(1m) has not yet
* been started or that _this_ routine nuked the door
* handle after receiving an EINTR for a REVOKED door.
*
* Returning NFSAUTH_DROP will cause the NFS client
* to retransmit the request, so let's try to be more
* rescillient and attempt for ntries before we bail.
*/
if (++ntries % NFSAUTH_DR_TRYCNT) {
goto retry;
}
sys_log("nfsauth: mountd has not established door");
*access = NFSAUTH_DROP;
return (FALSE);
}
ntries = 0;
/*
* Now that we've got what we need, place the call.
*/
case 0: /* Success */
/*
* The door_return that contained the data
* failed! We're here because of the 2nd
* door_return (w/o data) such that we can
* get control of the thread (and exit
* gracefully).
*/
door_arg_t *, &da);
goto fail;
}
break;
case EAGAIN:
/*
* Server out of resources; back off for a bit
*/
goto retry;
/* NOTREACHED */
case EINTR:
/*
* The server barfed and revoked
* the (existing) door on us; we
* want to wait to give smf(5) a
* chance to restart mountd(1m)
* and establish a new door handle.
*/
}
goto retry;
}
/*
* If the door was _not_ revoked on us,
* then more than likely we took an INTR,
* so we need to fail the operation.
*/
goto fail;
}
/*
* The only failure that can occur from getting
* the door info is EINVAL, so we let the code
* below handle it.
*/
/* FALLTHROUGH */
case EBADF:
case EINVAL:
default:
/*
* If we have a stale door handle, give smf a last
* chance to start it by sleeping for a little bit.
* If we're still hosed, we'll fail the call.
*
* Since we're going to reacquire the door handle
* upon the retry, we opt to sleep for a bit and
* _not_ to clear mountd_dh. If mountd restarted
* and was able to set mountd_dh, we should see
* the new instance; if not, we won't get caught
*/
if (!last) {
last++;
goto retry;
}
sys_log("nfsauth: stale mountd door handle");
goto fail;
}
/*
* No door errors encountered; setup the XDR stream for decoding
* the results. If we fail to decode the results, we've got no
* other recourse than to fail the request.
*/
XDR_DESTROY(&xdrs);
goto fail;
}
XDR_DESTROY(&xdrs);
case NFSAUTH_DR_OKAY:
KM_SLEEP);
*srv_gids_cnt * sizeof (gid_t));
break;
case NFSAUTH_DR_EFAIL:
case NFSAUTH_DR_DECERR:
case NFSAUTH_DR_BADCMD:
default:
fail:
*access = NFSAUTH_DENIED;
return (FALSE);
/* NOTREACHED */
}
return (TRUE);
}
static void
nfsauth_refresh_thread(void)
{
int access;
"nfsauth_refresh");
for (;;) {
if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
/* Keep the hold on the lock! */
break;
}
continue;
}
/*
* Since the ren was removed from the refreshq_queue above,
* this is the only thread aware about the ren existence, so we
* have the exclusive ownership of it and we do not need to
* protect it by any lock.
*/
mutex_enter(&p->auth_lock);
/*
* Once the entry goes INVALID, it can not change
* state.
*
* No need to refresh entries also in a case we are
* just shutting down.
*
* In general, there is no need to hold the
* refreshq_lock to test the refreshq_thread_state. We
* do hold it at other places because there is some
* related thread synchronization (or some other tasks)
* close to the refreshq_thread_state check.
*
* The check for the refreshq_thread_state value here
* is purely advisory to allow the faster
* nfsauth_refresh_thread() shutdown. In a case we
* will miss such advisory, nothing catastrophic
* happens: we will just spin longer here before the
* shutdown.
*/
if (p->auth_state == NFS_AUTH_INVALID ||
mutex_exit(&p->auth_lock);
if (p->auth_state == NFS_AUTH_INVALID)
continue;
}
/*
* Make sure the state is valid. Note that once we
* change the state to NFS_AUTH_REFRESHING, no other
* thread will be able to work on this entry.
*/
mutex_exit(&p->auth_lock);
struct exportinfo *, exi,
struct auth_cache *, p);
/*
* The first caching of the access rights
* is done with the netid pulled out of the
* request from the client. All subsequent
* users of the cache may or may not have
* the same netid. It doesn't matter. So
* when we refresh, we simply use the netid
* of the request which triggered the
* refresh attempt.
*/
/*
* This can only be set in one other place
* and the state has to be NFS_AUTH_FRESH.
*/
mutex_enter(&p->auth_lock);
if (p->auth_state == NFS_AUTH_INVALID) {
mutex_exit(&p->auth_lock);
} else {
/*
* If we got an error, do not reset the
* time. This will cause the next access
* check for the client to reschedule this
* node.
*/
p->auth_access = access;
p->auth_srv_uid = uid;
p->auth_srv_gid = gid;
p->auth_srv_ngids * sizeof (gid_t));
p->auth_srv_ngids = ngids;
p->auth_srv_gids = gids;
p->auth_freshness = gethrestime_sec();
}
p->auth_state = NFS_AUTH_FRESH;
cv_broadcast(&p->auth_cv);
mutex_exit(&p->auth_lock);
}
}
}
zthread_exit();
}
int
{
int c;
return (-1);
return (1);
if (c < 0)
return (-1);
if (c > 0)
return (1);
return (0);
}
static int
{
int c;
return (-1);
return (1);
return (-1);
return (1);
return (-1);
return (1);
return (-1);
return (1);
if (c < 0)
return (-1);
if (c > 0)
return (1);
return (0);
}
/*
* Get the access information from the cache or callup to the mountd
* to get and cache the access information in the kernel.
*/
static int
{
struct auth_cache_clnt *c;
struct auth_cache *p = NULL;
int access;
/*
* Now check whether this client already
* has an entry for this flavor in the cache
* for this export.
* Get the caller's address, mask off the
* parts of the address that do not identify
* the host (port number, etc), and then hash
* it to find the chain of cache entries.
*/
if (c == NULL) {
goto retrieve;
/*
* Initialize the new auth_cache_clnt
*/
goto retrieve;
}
sizeof (struct auth_cache),
if (c == NULL) {
c = nc;
} else {
}
}
if (p == NULL) {
rw_exit(&c->authc_lock);
goto retrieve;
}
/*
* Initialize the new auth_cache
*/
np->auth_srv_ngids = 0;
if (p == NULL) {
rw_downgrade(&c->authc_lock);
p = np;
} else {
rw_downgrade(&c->authc_lock);
}
} else {
}
mutex_enter(&p->auth_lock);
rw_exit(&c->authc_lock);
/*
* If the entry is in the WAITING state then some other thread is just
* retrieving the required info. The entry was either NEW, or the list
* of client's supplemental groups is going to be changed (either by
* this thread, or by some other thread). We need to wait until the
* nfsauth_retrieve() is done.
*/
while (p->auth_state == NFS_AUTH_WAITING)
/*
* Here the entry cannot be in WAITING or INVALID state.
*/
/*
* If the cache entry is not valid yet, we need to retrieve the
* info ourselves.
*/
if (p->auth_state == NFS_AUTH_NEW) {
/*
* NFS_AUTH_NEW is the default output auth_state value in a
* case we failed somewhere below.
*/
p->auth_state = NFS_AUTH_WAITING;
mutex_exit(&p->auth_lock);
p->auth_access = access;
/*
* We need a copy of gids for the
* auth_cache entry
*/
}
p->auth_srv_uid = tmpuid;
p->auth_srv_gid = tmpgid;
p->auth_srv_ngids = tmpngids;
p->auth_srv_gids = tmpgids;
}
}
/*
* Set the auth_state and notify waiters.
*/
mutex_enter(&p->auth_lock);
p->auth_state = state;
cv_broadcast(&p->auth_cv);
mutex_exit(&p->auth_lock);
} else {
p->auth_time = gethrestime_sec();
*uid = p->auth_srv_uid;
*gid = p->auth_srv_gid;
*ngids = p->auth_srv_ngids;
}
access = p->auth_access;
if ((refresh > NFSAUTH_CACHE_REFRESH) &&
p->auth_state == NFS_AUTH_FRESH) {
p->auth_state = NFS_AUTH_STALE;
mutex_exit(&p->auth_lock);
struct exportinfo *, exi,
struct auth_cache *, p,
KM_SLEEP);
/*
* We should not add a work queue
* item if the thread is not
* accepting them.
*/
if (refreshq_thread_state == REFRESHQ_THREAD_RUNNING) {
/*
* Is there an existing exi_list?
*/
break;
}
}
ren = kmem_alloc(
sizeof (refreshq_exi_node_t),
KM_SLEEP);
sizeof (refreshq_auth_node_t),
ran_node));
ran);
}
} else {
}
} else {
mutex_exit(&p->auth_lock);
}
}
return (access);
/*
* Retrieve the required data without caching.
*/
} else {
}
}
return (access);
}
/*
* Check if the requesting client has access to the filesystem with
* a given nfs flavor number which is an explicitly shared flavor.
*/
int
{
int access;
if (! (perm & M_4SEC_EXPORTED)) {
return (NFSAUTH_DENIED);
}
/*
* Optimize if there are no lists
*/
perm &= ~M_4SEC_EXPORTED;
return (NFSAUTH_RO);
return (NFSAUTH_RW);
}
NULL);
return (access);
}
int
{
/*
* By default root is mapped to anonymous user.
* This might get overriden later in nfsauth_cache_get().
*/
} else {
}
*ngids = 0;
/*
* Get the nfs flavor number from xprt.
*/
/*
* First check the access restrictions on the filesystem. If
* there are no lists associated with this flavor then there's no
* need to make an expensive call to the nfsauth service or to
* cache anything.
*/
authnone_entry = i;
continue;
}
break;
}
mapaccess = 0;
/*
* Flavor not found, but use AUTH_NONE if it exists
*/
if (authnone_entry == -1)
return (NFSAUTH_DENIED);
i = authnone_entry;
}
/*
* If the flavor is in the ex_secinfo list, but not an explicitly
* shared flavor by the user, it is a result of the nfsv4 server
* namespace setup. We will grant an RO permission similar for
* a pseudo node except that this node is a shared one.
*
* e.g. flavor in (flavor) indicates that it is not explictly
* shared by the user:
*
* / (sys, krb5)
* |
* export #share -o sec=sys (krb5)
* |
* secure #share -o sec=krb5
*
* In this case, when a krb5 request coming in to access
* /export, RO permission is granted.
*/
return (mapaccess | NFSAUTH_RO);
/*
* Optimize if there are no lists.
* We cannot optimize for AUTH_SYS with NGRPS (16) supplemental groups.
*/
perm &= ~M_4SEC_EXPORTED;
return (mapaccess | NFSAUTH_RO);
return (mapaccess | NFSAUTH_RW);
}
/*
* For both NFSAUTH_DENIED and NFSAUTH_WRONGSEC we do not care about
* the supplemental groups.
*/
*ngids = 0;
}
}
/*
* Client's security flavor doesn't match with "ro" or
* "rw" list. Try again using AUTH_NONE if present.
*/
/*
* Have we already encountered AUTH_NONE ?
*/
if (authnone_entry != -1) {
} else {
/*
* Check for AUTH_NONE presence.
*/
NULL);
break;
}
}
}
}
if (access & NFSAUTH_DENIED)
}
static void
{
avl_destroy(&p->authc_tree);
rw_destroy(&p->authc_lock);
kmem_free(p, sizeof (*p));
}
static void
{
crfree(p->auth_clnt_cred);
mutex_destroy(&p->auth_lock);
cv_destroy(&p->auth_cv);
}
/*
* Free the nfsauth cache for a given export
*/
void
{
int i;
/*
* The only way we got here was with an exi_rele, which means that no
* auth cache entry is being refreshed.
*/
for (i = 0; i < AUTH_TABLESIZE; i++) {
}
}
/*
* Called by the kernel memory allocator when
* memory is low. Free unused cache entries.
* If that's not enough, the VM system will
* call again for some more.
*/
/*ARGSUSED*/
void
{
int i;
for (i = 0; i < EXPTABLESIZE; i++) {
}
}
}
void
{
struct auth_cache_clnt *c;
struct auth_cache *p;
int i;
for (i = 0; i < AUTH_TABLESIZE; i++) {
/*
* Free entries that have not been
* used for NFSAUTH_CACHE_TRIM seconds.
*/
/*
* We are being called by the kmem subsystem to reclaim
* memory so don't block if we can't get the lock.
*/
return;
}
p = next) {
mutex_enter(&p->auth_lock);
/*
* entries.
*/
if (p->auth_time > stale_time ||
p->auth_state == NFS_AUTH_WAITING) {
mutex_exit(&p->auth_lock);
continue;
}
auth_state_t, p->auth_state);
/*
* STALE and REFRESHING entries needs to be
* marked INVALID only because they are
* referenced by some other structures or
* threads. They will be freed later.
*/
if (p->auth_state == NFS_AUTH_STALE ||
p->auth_state == NFS_AUTH_REFRESHING) {
p->auth_state = NFS_AUTH_INVALID;
mutex_exit(&p->auth_lock);
avl_remove(&c->authc_tree, p);
} else {
mutex_exit(&p->auth_lock);
avl_remove(&c->authc_tree, p);
}
}
rw_exit(&c->authc_lock);
}
continue;
}
continue;
avl_remove(tree, c);
}
}
}