/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#ifndef _NFS_NFS_CLNT_H
#define _NFS_NFS_CLNT_H
#include <sys/condvar_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rfscall() flags
*/
/*
* Fake errno passed back from rfscall to indicate transfer size adjustment
*/
/*
* The NFS specific async_reqs structure. iotype is grouped to support two
* types of async thread pools, please read comments section of mntinfo_t
* definition for more information. Care should be taken while adding new
* members to this group.
*/
enum iotype {
};
/*
* NFS async requests queue type.
*/
enum ioqtype {
};
/*
* Number of NFS async threads operating exclusively on page op requests.
*/
struct nfs_async_read_req {
};
struct nfs_pageio_req {
int flags;
};
struct nfs_readdir_req {
};
struct nfs_commit_req {
};
struct nfs_inactive_req {
};
struct nfs_async_reqs {
#ifdef DEBUG
#endif
union {
} a_args;
};
/*
* Due to the way the address space callbacks are used to execute a delmap,
* we must keep track of how many times the same thread has called
* VOP_DELMAP()->nfs_delmap()/nfs3_delmap(). This is done by having a list of
* nfs_delmapcall_t's associated with each rnode_t. This list is protected
* by the rnode_t's r_statelock. The individual elements do not need to be
* protected as they will only ever be created, modified and destroyed by
* one thread (the call_id).
* See nfs_delmap()/nfs3_delmap() for further explanation.
*/
typedef struct nfs_delmapcall {
/*
* delmap address space callback args
*/
typedef struct nfs_delmap_args {
#ifdef _KERNEL
extern nfs_delmapcall_t *nfs_init_delmapcall(void);
extern void nfs_free_delmapcall(nfs_delmapcall_t *);
#endif /* _KERNEL */
/*
* The following structures, chhead and chtab, make up the client handle
* cache. chhead represents a quadruple(RPC program, RPC version, Protocol
* Family, and Transport). For example, a chhead entry could represent
* linked list and is referenced from chtable.
*
* chtab represents an allocated client handle bound to a particular
* quadruple. These nodes chain down from a chhead node. chtab
* entries which are on the chain are considered free, so a thread may simply
* unlink the first node without traversing the chain. When the thread is
* completed with its request, it puts the chtab node back on the chain.
*/
typedef struct chhead {
} chhead_t;
typedef struct chtab {
} chtab_t;
/*
* clinfo is a structure which encapsulates data that is needed to
* obtain a client handle from the cache
*/
typedef struct clinfo {
} clinfo_t;
/*
* Failover information, passed opaquely through rfscall()
*/
typedef struct failinfo {
} failinfo_t;
/*
* Static server information
*
* These fields are protected by sv_lock:
* sv_flags
*/
typedef struct servinfo {
} servinfo_t;
/*
* The values for sv_flags.
*/
/*
* Switch from RDMA knconf to original mount knconf
*/
#if defined(_KERNEL)
/*
* NFS private data per mounted file system
* The mi_lock mutex protects the following fields:
* mi_flags
* mi_printed
* mi_down
* mi_tsize
* mi_stsize
* mi_curread
* mi_curwrite
* mi_timers
* mi_curr_serv
* mi_readers
* mi_klmconfig
*
* The mi_async_lock mutex protects the following fields:
* mi_async_reqs
* mi_async_req_count
* mi_async_tail
* mi_async_curr[NFS_MAX_ASYNC_QUEUES]
* mi_async_clusters
* mi_async_init_clusters
* mi_threads[NFS_MAX_ASYNC_QUEUES]
* mi_manager_thread
*
* Normally the netconfig information for the mount comes from
* mi_curr_serv and mi_klmconfig is NULL. If NLM calls need to use a
* different transport, mi_klmconfig contains the necessary netconfig
* information.
*
* 'mi_zone' is initialized at structure creation time, and never
* changes; it may be read without a lock.
*
* mi_zone_node is linkage into the mi4_globals.mig_list, and is
* protected by mi4_globals.mig_list_lock.
*
* Locking order:
* mi_globals::mig_lock > mi_async_lock > mi_lock
*/
typedef struct mntinfo {
/*
* Extra fields for congestion control, one per NFS call type,
* plus one global one.
*/
/*
* Async I/O management
* We have 2 pools of threads working on async I/O:
* (i) Threads which work on all async queues. Default number of
* threads in this queue is 8. Threads in this pool work on async
* queue pointed by mi_async_curr[NFS_ASYNC_QUEUE]. Number of
* active threads in this pool is tracked by
* mi_threads[NFS_ASYNC_QUEUE].
* (ii)Threads which work only on page op async queues.
* Page ops queue comprises of NFS_PUTAPAGE, NFS_PAGEIO &
* NFS_COMMIT. Default number of threads in this queue is 2
* (NUM_ASYNC_PGOPS_THREADS). Threads in this pool work on async
* queue pointed by mi_async_curr[NFS_ASYNC_PGOPS_QUEUE]. Number
* of active threads in this pool is tracked by
* mi_threads[NFS_ASYNC_PGOPS_QUEUE].
*/
/* current async queue */
/* number of active async threads */
/* tell workers to work */
/*
* Other stuff
*/
/*
* ACL entries
*/
/*
* Client Side Failover stats
*/
/*
* Kstat statistics
*/
/*
* Zones support.
*/
/*
* Serializes threads in failover_remap.
* Need to acquire this lock first in failover_remap() function
* before acquiring any other rnode lock.
*/
} mntinfo_t;
#endif /* _KERNEL */
/*
* vfs pointer to mount info
*/
/*
* vnode pointer to mount info
*/
/*
* The values for mi_flags.
*/
/*
* Read-only mntinfo statistics
*/
struct mntinfo_kstat {
int mik_timeo;
int mik_retrans;
struct {
};
/*
* Macro to wakeup sleeping async worker threads.
*/
}
}
/*
* Mark cached attributes as timed out
*
* The caller must not be holding the rnode r_statelock mutex.
*/
}
}
/*
* Is the attribute cache valid?
*/
/*
* Flags to indicate whether to purge the DNLC for non-directory vnodes
* in a call to nfs_purge_caches.
*/
#define NFS_NOPURGE_DNLC 0
/*
* If returned error is ESTALE flush all caches.
*/
} \
if (vn_has_cached_data(vp)) \
}
/*
* Is cache valid?
* Swap is always valid, if no attributes (attrtime == 0) or
* if mtime matches cached mtime it is valid
* NOTE: mtime is now a timestruc_t.
* Caller should be holding the rnode r_statelock mutex.
*/
/*
* Macro to detect forced unmount or a zone shutdown.
*/
/*
* Convert NFS tunables to hrtime_t units, seconds to nanoseconds.
*/
/*
* Structure to identify owner of a PC file share reservation.
*/
struct nfs_owner {
};
/*
* Values for magic.
*/
/*
* Support for extended attributes
*/
/*
* Short hand for checking to see whether the file system was mounted
* interruptible or not.
*/
/*
* Short hand for checking whether failover is enabled or not
*/
/*
* How long will async threads wait for additional work.
*/
#ifdef _KERNEL
struct chtab **);
extern void nfs_mi_zonelist_add(mntinfo_t *);
extern void nfs_free_mi(mntinfo_t *);
extern void nfs_mnt_kstat_init(struct vfs *);
#endif
/*
* Per-zone data for managing client handles. Included here solely for the
* benefit of MDB.
*/
/*
* client side statistics
*/
struct clstat {
#ifdef DEBUG
#endif
};
struct nfs_clnt {
};
#ifdef __cplusplus
}
#endif
#endif /* _NFS_NFS_CLNT_H */