/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* System call I/F to doors (outside of vnodes I/F) and misc support
* routines
*/
#include <sys/door_data.h>
#include <sys/vfs_opreg.h>
#include <sys/schedctl.h>
#include <sys/sysmacros.h>
#include <sys/pathname.h>
/*
* The maximum amount of data (in bytes) that will be transferred using
* an intermediate kernel buffer. For sizes greater than this we map
* in the destination pages and perform a 1-copy transfer.
*/
/*
* Maximum amount of data that will be transferred in a reply to a
* door_upcall. Need to guard against a process returning huge amounts
* of data and getting the kernel stuck in kmem_alloc.
*/
/*
* Maximum number of descriptors allowed to be passed in a single
* door_call or door_return. We need to allocate kernel memory
* for all of them at once, so we can't let it scale without limit.
*/
/*
* Definition of a door handle, used by other kernel subsystems when
* calling door functions. This is really a file structure but we
* want to hide that fact.
*/
struct __door_handle {
};
static int doorfs(long, long, long, long, long, long);
6,
(int (*)())doorfs,
};
};
#ifdef _SYSCALL32_IMPL
static int
6,
(int (*)())doorfs32,
};
"32-bit door syscalls",
};
#endif
&modlsys,
#ifdef _SYSCALL32_IMPL
#endif
};
extern struct vnodeops *door_vnodeops;
int
_init(void)
{
};
extern const fs_operation_def_t door_vnodeops_template[];
int error;
return (ENXIO);
/* Create a dummy vfs */
if (error != 0) {
return (error);
}
if (error != 0) {
return (error);
}
return (mod_install(&modlinkage));
}
int
{
}
/* system call functions */
static int door_call(int, void *);
static int door_revoke(int);
static int door_ucred(struct ucred_s *);
static int door_bind(int);
static int door_unbind(void);
static int door_unref(void);
static int door_getparam(int, int, size_t *);
static int door_setparam(int, int, size_t);
/*
* System call wrapper for all door related system calls
*/
static int
{
switch (subcode) {
case DOOR_CALL:
case DOOR_RETURN: {
return (EFAULT);
}
}
case DOOR_RETURN_OLD:
/*
* In order to support the S10 runtime environment, we
* still respond to the old syscall subcode for door_return.
* We treat it as having no stack limits. This code should
* be removed when such support is no longer needed.
*/
case DOOR_CREATE:
case DOOR_REVOKE:
return (door_revoke(arg1));
case DOOR_INFO:
case DOOR_BIND:
case DOOR_UNBIND:
return (door_unbind());
case DOOR_UNREFSYS:
return (door_unref());
case DOOR_UCRED:
case DOOR_GETPARAM:
case DOOR_SETPARAM:
default:
}
}
#ifdef _SYSCALL32_IMPL
/*
* System call wrapper for all door related system calls from 32-bit programs.
* Needed at the moment because of the casts - they undo some damage
* that truss causes (sign-extending the stack pointer) when truss'ing
* a 32-bit program using doors.
*/
static int
{
switch (subcode) {
case DOOR_CALL:
case DOOR_RETURN: {
return (EFAULT);
return (door_return(
}
}
case DOOR_RETURN_OLD:
/*
* In order to support the S10 runtime environment, we
* still respond to the old syscall subcode for door_return.
* We treat it as having no stack limits. This code should
* be removed when such support is no longer needed.
*/
case DOOR_CREATE:
case DOOR_REVOKE:
return (door_revoke(arg1));
case DOOR_INFO:
case DOOR_BIND:
case DOOR_UNBIND:
return (door_unbind());
case DOOR_UNREFSYS:
return (door_unref());
case DOOR_UCRED:
return (door_ucred(
case DOOR_GETPARAM:
case DOOR_SETPARAM:
default:
}
}
#endif
void shuttle_swtch(kmutex_t *);
void shuttle_sleep(kthread_t *);
/*
* Support routines
*/
static int door_create_common(void (*)(), void *, uint_t, int, int *,
file_t **);
static int door_translate_in(void);
static int door_translate_out(void);
static void door_list_insert(door_node_t *);
static door_data_t *
{
return (ddp);
}
static door_server_t *
{
}
static door_client_t *
{
}
/*
* System call to create a door
*/
int
{
int fd;
int err;
if ((attributes & ~DOOR_CREATE_MASK) ||
(DOOR_UNREF | DOOR_UNREF_MULTI)))
return (fd);
}
/*
* Common code for creating user and kernel doors. If a door was
* created, stores a file structure pointer in the location pointed
* to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL
* pointer to a file descriptor is passed in as fdp, allocates a file
* descriptor representing the door. If a door could not be created,
* returns an error.
*/
static int
{
dp->door_target = p;
#ifdef _SYSCALL32_IMPL
else
#endif
/* add to per-process door list */
/*
* If the file table is full, remove the door from the
* per-process list, free the door, and return NULL.
*/
return (EMFILE);
}
return (0);
}
static int
{
/* we allow unref upcalls through, despite any minimum */
return (ENOBUFS);
return (ENOBUFS);
return (ENOTSUP);
return (ENFILE);
return (0);
}
/*
* Door invocation.
*/
int
{
/* Locals */
int error = 0;
/* destructor for data returned by a kernel server */
void *destarg;
int gotresults = 0;
int needcleanup = 0;
int cancel_pending;
/*
* Get the arguments
*/
if (args) {
if (datamodel == DATAMODEL_NATIVE) {
} else {
}
} else {
/* No arguments, and no results allowed */
}
/*
* We don't want to hold the door FD over the entire operation;
* instead, we put a hold on the door vnode and release the FD
* immediately
*/
/*
* This should be done in shuttle_resume(), just before going to
* sleep, but we want to avoid overhead while holding door_knob.
* prstop() is just a no-op if we don't really go to sleep.
* We test not-kernel-address-space for the sake of clustering code.
*/
prstop(PR_REQUESTED, 0);
if (DOOR_INVALID(dp)) {
goto out;
}
/*
* before we do anything, check that we are not overflowing the
* required limits.
*/
if (error != 0) {
goto out;
}
/*
* Check for in-kernel door server.
*/
dp->door_active++;
/* translate file descriptors to vnodes */
error = door_translate_in();
if (error)
goto out;
}
/*
* Call kernel door server. Arguments are passed and
* returned as a door_arg pointer. When called, data_ptr
* points to user data and desc_ptr points to a kernel list
* of door descriptors that have been converted to file
* structure pointers. It's the server function's
* responsibility to copyin the data pointed to by data_ptr
* (this avoids extra copying in some cases). On return,
* data_ptr points to a user buffer of data, and desc_ptr
* points to a kernel list of door descriptors representing
* files. When a reference is passed to a kernel server,
* it is the server's responsibility to release the reference
* (by calling closef). When the server includes a
* reference in its reply, it is released as part of the
* the call (the server must duplicate the reference if
* it wants to retain a copy). The destfn, if set to
* non-NULL, is a destructor to be called when the returned
* kernel data (if any) is no longer needed (has all been
* translated and copied to user level).
*/
/* not implemented yet */
if (error)
goto out;
/* translate vnodes to files */
error = door_translate_out();
if (error)
goto out;
}
/* handle overflow */
if (error)
goto out;
/* door_overflow sets d_args rbuf and rsize */
} else {
}
goto results;
}
/*
* Get a server thread from the target domain
*/
if (DOOR_INVALID(dp))
else
goto out;
}
/*
* Move data from client to server
*/
if (error) {
/*
* We're not going to resume this thread after all
*/
goto out;
}
}
dp->door_active++;
ct->d_args_done = 0;
/*
* Premature wakeup. Find out why (stop, forkall, sig, exit ...)
*/
cancel_pending = 0;
(cancel_pending = schedctl_cancel_pending()) != 0) {
/* Signal, forkall, ... */
lwp->lwp_sysabort = 0;
if (cancel_pending)
/*
* If the server has finished processing our call,
* or exited (calling door_slam()), then d_error
* will have changed. If the server hasn't finished
* yet, d_error will still be DOOR_WAIT, and we
* let it know we are not interested in any
* results by sending a SIGCANCEL, unless the door
* is marked with DOOR_NO_CANCEL.
*/
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
}
}
} else {
/*
* Return from stop(), server exit...
*
* Note that the server could have done a
* door_return while the client was in stop state
* (ISSIG), in which case the error condition
* is updated by the server.
*/
/* Still waiting for a reply */
lwp->lwp_asleep = 0;
goto shuttle_return;
/* Server exit */
} else {
/* Server did a door_return during ISSIG */
}
}
/*
* Can't exit if the server is currently copying
* results for me.
*/
while (DOOR_T_HELD(ct))
/*
* If the server has not processed our message, free the
* descriptors.
*/
if (!ct->d_args_done) {
needcleanup = 1;
}
/*
* Find out if results were successfully copied.
*/
gotresults = 1;
}
if (needcleanup)
/*
* Move the results to userland (if any)
*/
if (ct->d_noresults)
goto out;
if (error) {
/*
* If server returned results successfully, then we've
* been interrupted and may need to clean up.
*/
if (gotresults) {
}
goto out;
}
/*
* Copy back data if we haven't caused an overflow (already
* handled) and we are using a 2 copy transfer, or we are
* returning data from a kernel server.
*/
goto out;
}
}
}
/*
* stuff returned doors into our proc, copyout the descriptors
*/
dsize = n * sizeof (door_desc_t);
while (n--) {
/* Close remaining files */
goto out;
}
}
goto out;
}
}
/*
* Return the results
*/
if (datamodel == DATAMODEL_NATIVE) {
sizeof (door_arg_t)) != 0)
} else {
}
}
out:
ct->d_noresults = 0;
/* clean up the overflow buffer if an error occurred */
}
ct->d_overflow = 0;
/* call destructor */
if (destfn) {
}
if (dp)
}
/* clean up the descriptor copyout buffer */
if (error != 0)
}
ct->d_fpp_size = 0;
}
if (error)
return (0);
}
static int
{
int error = 0;
if (DOOR_INVALID(dp)) {
return (EBADF);
}
/*
* door_ki_setparam() can only affect kernel doors.
* door_setparam() can only affect doors attached to the current
* process.
*/
return (EPERM);
}
switch (type) {
case DOOR_PARAM_DESC_MAX:
else
break;
case DOOR_PARAM_DATA_MIN:
else
break;
case DOOR_PARAM_DATA_MAX:
else
break;
default:
break;
}
return (error);
}
static int
{
int error = 0;
switch (type) {
case DOOR_PARAM_DESC_MAX:
break;
case DOOR_PARAM_DATA_MIN:
break;
case DOOR_PARAM_DATA_MAX:
break;
default:
break;
}
return (error);
}
int
{
int error = 0;
if (error)
return (0);
}
int
{
int error = 0;
if (error)
if (get_udatamodel() == DATAMODEL_NATIVE) {
#ifdef _SYSCALL32_IMPL
} else {
#endif /* _SYSCALL32_IMPL */
}
return (0);
}
/*
* A copyout() which proceeds from high addresses to low addresses. This way,
* stack guard pages are effective.
*
* Note that we use copyout_nowatch(); this is called while the client is
* held.
*/
static int
{
while (count > 0) {
return (1);
}
return (0);
}
/*
* Writes the stack layout for door_return() into the door_server_t of the
* server thread.
*/
static int
{
#ifndef _STACK_GROWS_DOWNWARD
#endif
#ifdef _SYSCALL32_IMPL
if (datamodel != DATAMODEL_NATIVE) {
results_sz = sizeof (struct door_results32);
}
#endif
/*
* To speed up the overflow checking, we do an initial check
* that the passed in data size won't cause us to wrap past
* base_sp. Since door_max_desc limits descsz, we can
* safely use it here. 65535 is an arbitrary 'bigger than
* we need, small enough to not cause trouble' constant;
* the only constraint is that it must be > than:
*
* 5 * STACK_ALIGN +
* sizeof (door_info_t) +
* sizeof (door_results_t) +
* (max adjustment from door_final_sp())
*
* After we compute the layout, we can safely do a "did we wrap
* around" check, followed by a check against the recorded
* stack size.
*/
return (E2BIG); /* overflow */
if (info_needed)
else
return (E2BIG); /* overflow */
return (E2BIG); /* doesn't fit in stack */
return (0);
}
static int
{
int error = 0;
}
/*
* Reset datap to NULL if we aren't passing any data. Be careful
* to let unref notifications through, though.
*/
if (datap == DOOR_UNREF_DATA) {
datasize = 0;
else
} else if (datasize == 0) {
}
/*
* Get the stack layout, if it hasn't already been done.
*/
if (!st->d_layout_done) {
(is_private && empty_pool));
if (error != 0)
goto fail;
}
/*
* fill out the stack, starting from the top. Layout was already
* filled in by door_args() or door_translate_out().
*/
while (ndesc > 0) {
goto fail;
}
ndesc--;
ncopied++;
fpp++;
}
goto fail;
}
}
goto fail;
}
}
}
if (is_private && empty_pool) {
goto fail;
}
}
if (get_udatamodel() == DATAMODEL_NATIVE) {
goto fail;
}
#ifdef _SYSCALL32_IMPL
} else {
sizeof (dr32))) {
goto fail;
}
#endif
}
fail:
if (error != 0)
}
return (error);
}
/*
* Return the results (if any) to the caller (if any) and wait for the
* next invocation on a door.
*/
int
{
int error = 0;
int cancel_pending;
/*
* If thread was bound to a door that no longer exists, return
* an error. This can happen if a thread is bound to a door
* before the process calls forkall(); in the child, the door
* doesn't exist and door_fork() sets the d_invbound flag.
*/
if (st->d_invbound)
/*
* This should be done in shuttle_resume(), just before going to
* sleep, but we want to avoid overhead while holding door_knob.
* prstop() is just a no-op if we don't really go to sleep.
* We test not-kernel-address-space for the sake of clustering code.
*/
prstop(PR_REQUESTED, 0);
/* Make sure the caller hasn't gone away */
if (desc_num != 0) {
/* close any DOOR_RELEASE descriptors */
if (error)
}
goto out;
}
/*
* Transfer results, if any, to the client
*/
/*
* Prevent the client from exiting until we have finished
* moving results.
*/
/*
* Pass EOVERFLOW errors back to the client
*/
}
}
out:
/* Put ourselves on the available server thread list */
/*
* Make sure the caller is still waiting to be resumed
*/
if (caller) {
/*
* Setting t_disp_queue prevents erroneous preemptions
* if this thread is still in execution on another
* processor
*/
/*
* We are calling thread_onproc() instead of
* THREAD_ONPROC() because compiler can reorder
* the two stores of t_state and t_lockp in
* THREAD_ONPROC().
*/
} else {
/* May have been setrun or in stop state */
}
} else {
}
/*
* We've sprung to life. Determine if we are part of a door
* invocation, or just interrupted
*/
/*
* Normal door invocation. Return any error condition
* encountered while trying to pass args to the server
* thread.
*/
lwp->lwp_asleep = 0;
/*
* Prevent the caller from leaving us while we
* are copying out the arguments from it's buffer.
*/
/* let the client know we have processed his message */
if (error) {
if (caller)
else
goto out;
}
return (0);
} else {
/*
* We are not involved in a door_invocation.
* Check for /proc related activity...
*/
cancel_pending = 0;
(cancel_pending = schedctl_cancel_pending()) != 0) {
if (cancel_pending)
lwp->lwp_asleep = 0;
lwp->lwp_sysabort = 0;
}
/* Go back and wait for another request */
lwp->lwp_asleep = 0;
goto out;
}
}
/*
* Revoke any future invocations on this door
*/
int
{
door_node_t *d;
int error;
if (d->door_target != curproc) {
}
d->door_flags |= DOOR_REVOKED;
if (d->door_flags & DOOR_PRIVATE)
else
/* Invalidate the descriptor */
return (0);
}
int
{
if (did == DOOR_QUERY) {
/* Get information on door current thread is bound to */
/* Thread isn't bound to a door */
/* Not a door */
}
if (did != DOOR_QUERY)
return (0);
}
/*
* Common code for getting information about a door either via the
* door_info system call or the door_ki_info kernel call.
*/
void
{
int unref_count;
else
/*
* If this door is in the middle of having an unreferenced
* notification delivered, don't count the VN_HOLD by
* door_deliver_unref in determining if it is unreferenced.
* This handles the case where door_info is called from the
* thread delivering the unref notification.
*/
unref_count = 2;
else
unref_count = 1;
/*
* If this thread is bound to the door, then we can just
* check the vnode; a ref count of 1 (or 2 if this is
* handling an unref notification) means that the hold
* from the door_bind is the only reference to the door
* (no file descriptor refers to it).
*/
} else {
/*
* If we're working from a file descriptor or door handle
* we need to look at the file structure count. We don't
* need to hold the vnode lock since this is just a snapshot.
*/
}
}
/*
* Return credentials of the door caller (if any) for this invocation
*/
int
{
struct proc *p;
int err;
}
/* Prevent caller from exiting while we examine the cred */
/*
* If the credentials are not specified by the client, get the one
* associated with the calling process.
*/
else
if (err != 0)
return (0);
}
/*
* Bind the current lwp to the server thread pool associated with 'did'
*/
int
{
/* Not a door */
}
/*
* Can't bind to a non-private door, and can't bind to a door
* served by another process.
*/
}
st->d_invbound = 0;
return (0);
}
/*
* Unbind the current lwp from it's server thread pool
*/
int
door_unbind(void)
{
if (st->d_invbound) {
st->d_invbound = 0;
return (0);
}
return (0);
}
/*
* Create a descriptor for the associated file and fill in the
* attributes associated with it.
*
* Return 0 for success, -1 otherwise;
*/
int
{
int fd;
return (-1);
/* Fill in the attributes */
attributes |= DOOR_LOCAL;
}
return (0);
}
/*
* Return an available thread for this server. A NULL return value indicates
* that either:
* The door has been revoked, or
* a signal was received.
* The two conditions can be differentiated using DOOR_INVALID(dp).
*/
static kthread_t *
{
int signalled;
else
for (;;) {
/*
* We search the thread pool, looking for a server thread
* ready to take an invocation (i.e. one which is still
* sleeping on a shuttle object). If none are available,
* we sleep on the pool's CV, and will be signaled when a
* thread is added to the pool.
*
* This relies on the fact that once a thread in the thread
* pool wakes up, it *must* remove and add itself to the pool
* before it can receive door calls.
*/
if (DOOR_INVALID(dp))
return (NULL); /* Target has become invalid */
break;
}
break; /* we've got a live one! */
&signalled)) {
/*
* If we were signaled and the door is still
* valid, pass the signal on to another waiter.
*/
return (NULL); /* Got a signal */
}
}
/*
* We've got a thread_lock()ed thread which is still on the
* shuttle. Take it off the list of available server threads
* and mark it as ONPROC. We are committed to resuming this
* thread now.
*/
/*
* Setting t_disp_queue prevents erroneous preemptions
* if this thread is still in execution on another processor
*/
/*
* We are calling thread_onproc() instead of
* THREAD_ONPROC() because compiler can reorder
* the two stores of t_state and t_lockp in
* THREAD_ONPROC().
*/
return (server_t);
}
/*
* Put a server thread back in the pool.
*/
static void
{
st->d_layout_done = 0;
} else {
}
pool->dp_threads = t;
/* If someone is waiting for a server thread, wake him up */
}
/*
* Remove a server thread from the pool if present.
*/
static void
{
} else {
pool = &p->p_server_threads;
}
if (*next == t) {
return;
}
}
}
/*
* Lookup the door descriptor. Caller must call releasef when finished
* with associated door.
*/
static door_node_t *
{
return (NULL);
/*
* Use the underlying vnode (we may be namefs mounted)
*/
return (NULL);
}
if (fpp)
}
/*
* The current thread is exiting, so clean up any pending
* invocation details
*/
void
door_slam(void)
{
/*
* If we are an active door server, notify our
* client that we are exiting and revoke our door.
*/
return;
for (;;) {
if (DOOR_T_HELD(ct))
else if (DOOR_T_HELD(st))
else
break; /* neither flag is set */
}
/* Revoke our door if the process is exiting */
else
}
if (t != NULL) {
/*
* Let the caller know we are gone
*/
thread_lock(t);
setrun_locked(t);
thread_unlock(t);
}
}
}
/*
* Set DOOR_REVOKED for all doors of the current process. This is called
* on exit before all lwp's are being terminated so that door calls will
* return with an error.
*/
void
{
}
}
/*
* The process is exiting, and all doors it created need to be revoked.
*/
void
door_exit(void)
{
/*
* Walk the list of active doors created by this process and
* revoke them all.
*/
}
/* Clear the list */
p->p_door_list = NULL;
/* Clean up the unref list */
}
}
/*
* The process is executing forkall(), and we need to flag threads that
* are bound to a door in the child. This will make the child threads
* return an error to door_return unless they call door_unbind first.
*/
void
{
/* parent thread is bound to a door */
}
}
/*
* Deliver queued unrefs to appropriate door server.
*/
static int
door_unref(void)
{
/* make sure there's only one unref thread per process */
if (p->p_unref_thread) {
}
p->p_unref_thread = 1;
for (;;) {
/* Grab a queued request */
/*
* Interrupted.
* Return so we can finish forkall() or exit().
*/
p->p_unref_thread = 0;
}
}
if (unref_args.rbuf != 0) {
unref_args.rsize = 0;
}
}
}
/*
* Deliver queued unrefs to kernel door server.
*/
/* ARGSUSED */
static void
{
/* should only be one of these */
if (p->p_unref_thread) {
return;
}
p->p_unref_thread = 1;
for (;;) {
/* Grab a queued request */
}
}
}
/*
* Queue an unref invocation for processing for the current process
* The door may or may not be revoked at this point.
*/
void
{
ASSERT(d->door_active == 0);
return;
/*
* Create a lwp to deliver unref calls if one isn't already running.
*
* A separate thread is used to deliver unrefs since the current
* thread may be holding resources (e.g. locks) in user land that
* may be needed by the unref processing. This would cause a
* deadlock.
*/
if (d->door_flags & DOOR_UNREF_MULTI) {
/* multiple unrefs */
d->door_flags &= ~DOOR_DELAY;
} else {
/* Only 1 unref per door */
}
/*
* Need to bump the vnode count before putting the door on the
* list so it doesn't get prematurely released by door_unref.
*/
/* is this door already on the unref list? */
if (d->door_flags & DOOR_UNREF_MULTI) {
if (d == dp) {
/* already there, don't need to add another */
return;
}
}
}
server->p_unref_list = d;
}
/*
* space in the callers address space for the results and copy the data
* there.
*
* For EOVERFLOW, we must clean up the server's door descriptors.
*/
static int
{
uint_t i;
/* Do initial overflow check */
return (EMFILE);
/*
* Allocate space for this stuff in the callers address space
*/
/* No virtual memory available, or anon mapping failed */
if (error)
return (error);
}
return (EOVERFLOW);
}
goto out;
if (data_size != 0) {
/* Copy any data */
while (len != 0) {
int amount;
int error;
return (error);
}
}
}
/* Copy any fd's */
if (desc_num != 0) {
int fpp_size;
return (EFAULT);
}
/* make more space */
if (ct->d_fpp_size)
}
for (i = 0; i < desc_num; i++) {
/* close translated references */
/* close untranslated references */
return (EINVAL);
}
/* release passed reference */
}
}
}
out:
return (0);
}
/*
* Transfer arguments from the client to the server.
*/
static int
{
int error;
if (ndid > door_max_desc)
return (E2BIG);
/*
* Get the stack layout, and fail now if it won't fit.
*/
if (error != 0)
return (error);
/*
* Use a 2 copy method for small amounts of data
*
* Allocate a little more than we need for the
* args, in the hope that the results will fit
* without having to reallocate a buffer
*/
return (EFAULT);
}
} else {
/*
* Use a 1 copy method
*/
/*
* Copy data directly into server. We proceed
* downward from the top of the stack, to mimic
* normal stack usage. This allows the guard page
* to stop us before we corrupt anything.
*/
while (len != 0) {
/*
* Locate the next part to copy.
*/
/*
* if we are on the final (first) page, fix
* up the start position.
*/
if (error != 0)
return (error);
}
}
}
/*
* Copyin the door args and translate them into files
*/
if (ndid != 0) {
return (EFAULT);
}
while (ndid--) {
/* We only understand file descriptors as passed objs */
/* close translated references */
/* close untranslated references */
ct->d_fpp_size = 0;
return (EINVAL);
}
/* Hold the fp */
/* release passed reference */
}
}
}
return (0);
}
/*
* Transfer arguments from a user client to a kernel server. This copies in
* descriptors and translates them into door handles. It doesn't touch the
* other data, letting the kernel server deal with that (to avoid needing
* to copy the data twice).
*/
static int
door_translate_in(void)
{
if (ndid > door_max_desc)
return (E2BIG);
/*
* Copyin the door args and translate them into door handles.
*/
if (ndid != 0) {
return (EFAULT);
}
while (ndid--) {
/*
* We only understand file descriptors as passed objs
*/
/* Hold the door */
/* release passed reference */
}
/* Set attributes */
} else {
/* close translated references */
/* close untranslated references */
return (EINVAL);
}
didpp++;
}
}
return (0);
}
/*
* Translate door arguments from kernel to user. This copies the passed
* door handles. It doesn't touch other data. It is used by door_upcall,
* and for data returned by a door_call to a kernel server.
*/
static int
door_translate_out(void)
{
if (ndid > door_max_desc) {
return (E2BIG);
}
/*
* Translate the door args into files
*/
if (ndid != 0) {
while (ndid--) {
/*
* We understand file descriptors and door
* handles as passed objs.
*/
/* Hold the fp */
/* release passed reference */
if (fd >= 0)
else
}
} else {
/* close translated references */
/* close untranslated references */
ct->d_fpp_size = 0;
return (EINVAL);
}
}
}
return (0);
}
/*
* Move the results from the server to the client
*/
static int
{
if (ct->d_noresults)
return (E2BIG); /* No results expected */
if (desc_num > door_max_desc)
return (E2BIG); /* Too many descriptors */
/*
* Check if the results are bigger than the clients buffer
*/
if (dsize)
else
return (0);
return (EMFILE);
return (E2BIG);
/*
* Handle upcalls
*/
/*
* If there's no return buffer or the buffer is too
* small, allocate a new one. The old buffer (if it
* exists) will be freed by the upcall client.
*/
if (result_size > door_max_upcall_reply)
return (E2BIG);
}
if (data_size != 0 &&
data_size) != 0)
return (EFAULT);
} else if (data_size != 0) {
if (data_size <= door_max_arg) {
/*
* Use a 2 copy method for small amounts of data
*/
}
return (EFAULT);
} else {
/* Copy data directly into client */
while (len != 0) {
int error;
if (off)
else
if (error != 0)
return (error);
}
}
}
/*
* Copyin the returned door ids and translate them into door_node_t
*/
if (desc_num != 0) {
uint_t i;
/* First, check if we would overflow client */
return (EMFILE);
return (EFAULT);
}
/* make more space */
if (ct->d_fpp_size)
}
for (i = 0; i < desc_num; i++) {
/* Only understand file descriptor results */
/* close translated references */
/* close untranslated references */
return (EINVAL);
}
/* release passed reference */
}
}
}
return (0);
}
/*
* Close all the descriptors.
*/
static void
{
uint_t i;
for (i = 0; i < n; i++) {
if (d->d_attributes & DOOR_DESCRIPTOR) {
(void) closeandsetf(
} else if (d->d_attributes & DOOR_HANDLE) {
}
d++;
}
}
/*
* Close descriptors that have the DOOR_RELEASE attribute set.
*/
void
{
uint_t i;
for (i = 0; i < n; i++) {
if (d->d_attributes & DOOR_RELEASE) {
if (d->d_attributes & DOOR_DESCRIPTOR) {
(void) closeandsetf(
} else if (from_kernel &&
(d->d_attributes & DOOR_HANDLE)) {
}
}
d++;
}
}
/*
* Copy descriptors into the kernel so we can release any marked
* DOOR_RELEASE.
*/
int
{
while (ndesc > 0) {
count * sizeof (door_desc_t))) {
return (EFAULT);
}
}
return (0);
}
/*
* Decrement ref count on all the files passed
*/
static void
{
uint_t i;
for (i = 0; i < n; i++)
}
/*
* Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
* bytes.
*
* Performs this using 1 mapin and 1 copy operation.
*
* We really should do more than 1 page at a time to improve
* performance, but for now this is treated as an anomalous condition.
*/
static int
{
int error = 0;
/*
* Lock down destination page.
*/
return (E2BIG);
/*
* Check if we have a shadow page list from as_pagelock. If not,
* we took the slow path and have to find our page struct the hard
* way.
*/
/* MMU mapping is already locked down */
/*
* TODO: The pfn step should not be necessary - need
* a hat_getpp() function.
*/
if (pf_is_memory(pfnum)) {
} else
return (E2BIG);
}
} else {
}
/*
* Map destination page into kernel address
*/
if (kpm_enable)
else
(caddr_t)-1);
/*
* Copy from src to dest
*/
/*
* Unmap destination page from kernel
*/
if (kpm_enable)
else
/*
* Unlock destination page
*/
return (error);
}
/*
* General kernel upcall using doors
* Returns 0 on success, errno for failures.
* Caller must have a hold on the door based vnode, and on any
* references passed in desc_ptr. The references are released
* in the event of an error, and passed without duplication
* otherwise. Note that param->rbuf must be 64-bit aligned in
* a 64-bit kernel, since it may be used to store door descriptors
* if they are returned by the server. The caller is responsible
* for holding a reference to the cred passed in.
*/
int
{
/* Locals */
int error = 0;
int gotresults = 0;
int cancel_pending;
return (EINVAL);
}
/*
* This should be done in shuttle_resume(), just before going to
* sleep, but we want to avoid overhead while holding door_knob.
* prstop() is just a no-op if we don't really go to sleep.
* We test not-kernel-address-space for the sake of clustering code.
*/
prstop(PR_REQUESTED, 0);
if (DOOR_INVALID(dp)) {
goto out;
}
/* Can't do an upcall to a kernel server */
goto out;
}
if (error != 0) {
goto out;
}
/*
* Get a server thread from the target domain
*/
if (DOOR_INVALID(dp))
else
goto out;
}
/*
* Move data from client to server
*/
error = door_translate_out();
if (error) {
/*
* We're not going to resume this thread after all
*/
goto out;
}
}
else
ct->d_noresults = 0;
dp->door_active++;
/*
* Premature wakeup. Find out why (stop, forkall, sig, exit ...)
*/
cancel_pending = 0;
(cancel_pending = schedctl_cancel_pending()) != 0)) {
/* Signal, forkall, ... */
if (cancel_pending)
lwp->lwp_sysabort = 0;
/*
* If the server has finished processing our call,
* or exited (calling door_slam()), then d_error
* will have changed. If the server hasn't finished
* yet, d_error will still be DOOR_WAIT, and we
* let it know we are not interested in any
* results by sending a SIGCANCEL, unless the door
* is marked with DOOR_NO_CANCEL.
*/
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
}
}
} else {
/*
* Return from stop(), server exit...
*
* Note that the server could have done a
* door_return while the client was in stop state
* (ISSIG), in which case the error condition
* is updated by the server.
*/
/* Still waiting for a reply */
if (lwp)
lwp->lwp_asleep = 0;
goto shuttle_return;
/* Server exit */
} else {
/* Server did a door_return during ISSIG */
}
}
/*
* Can't exit if the server is currently copying
* results for me
*/
while (DOOR_T_HELD(ct))
/*
* Find out if results were successfully copied.
*/
gotresults = 1;
}
if (lwp) {
}
/*
* Translate returned doors (if any)
*/
if (ct->d_noresults)
goto out;
if (error) {
/*
* If server returned results successfully, then we've
* been interrupted and may need to clean up.
*/
if (gotresults) {
}
goto out;
}
while (n--) {
}
}
/* on return data is in rbuf */
out:
ct->d_fpp_size = 0;
}
ct->d_noresults = 0;
return (error);
}
/*
* Add a door to the per-process list of active doors for which the
* process is a server.
*/
static void
{
p->p_door_list = dp;
}
/*
* Remove a door from the per-process list of active doors.
*/
void
{
/*
* Find the door in the list. If the door belongs to another process,
* it's OK to use p_door_list since that process can't exit until all
* doors have been taken off the list (see door_exit).
*/
/* found it, take it off the list */
}
/*
* External kernel interfaces for doors. These functions are available
* outside the doorfs module for use in creating and using doors from
* within the kernel.
*/
/*
* door_ki_upcall invokes a user-level door server from the kernel, with
* the credentials associated with curthread.
*/
int
{
}
/*
* door_ki_upcall_limited invokes a user-level door server from the
* kernel with the given credentials and reply limits. If the "cred"
* argument is NULL, uses the credentials associated with current
* thread. max_data limits the maximum length of the returned data (the
* client will get E2BIG if they go over), and max_desc limits the
* number of returned descriptors (the client will get EMFILE if they
* go over).
*/
int
{
}
/*
* Function call to create a "kernel" door server. A kernel door
* server provides a way for a user-level process to invoke a function
* in the kernel through a door_call. From the caller's point of
* view, a kernel door server looks the same as a user-level one
* (except the server pid is 0). Unlike normal door calls, the
* kernel door function is invoked via a normal function call in the
* same thread and context as the caller.
*/
int
{
int err;
/* no DOOR_PRIVATE */
if ((attributes & ~DOOR_KI_CREATE_MASK) ||
return (EINVAL);
p0.p_unref_thread == 0) {
/* need to create unref thread for process 0 */
}
if (err == 0) {
}
return (err);
}
void
{
}
void
{
}
int
{
int err;
return (err);
return (err);
}
return (EINVAL);
}
return (err);
}
/* falloc returns with f_tlock held on success */
return (0);
}
int
{
return (EINVAL);
return (0);
}
{
/* is the descriptor really a door? */
return (NULL);
/* got the door, put a hold on it and release the fd */
return (dh);
}
int
{
return (EINVAL);
}
int
{
return (EINVAL);
}