scalls.c revision b6233ca500dcfb36bc38a5dbd2c7e3584f2c2485
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "lint.h"
#include "thr_uberdata.h"
#include <stdarg.h>
#include <poll.h>
#include <stropts.h>
#include <dlfcn.h>
/*
* fork_lock does double-duty. Not only does it (and atfork_lock)
* serialize calls to fork() and forkall(), but it also serializes calls
* to thr_suspend() and thr_continue() (because fork() and forkall() also
* suspend and continue other threads and they want no competition).
*
* atfork_lock also does double-duty. Not only does it protect the
* pthread_atfork() data structures, but it also serializes I18N calls
* to functions in dlopen()ed L10N objects. These functions can do
* anything, including call malloc() and free(). Such calls are not
* fork-safe when protected by an ordinary mutex because, with an
* interposed malloc library present, there would be a lock ordering
* violation due to the pthread_atfork() prefork function in the
* interposition library acquiring its malloc lock(s) before the
* ordinary mutex in libc being acquired by libc's prefork functions.
*
* Within libc, calls to malloc() and free() are fork-safe only if the
* calls are made while holding no other libc locks. This covers almost
* all of libc's malloc() and free() calls. For those libc code paths,
* such as the above-mentioned I18N calls, that require serialization and
* that may call malloc() or free(), libc uses atfork_lock_enter() to perform
* the serialization. This works because atfork_lock is acquired by fork()
* before any of the pthread_atfork() prefork functions are called.
*/
void
fork_lock_enter(void)
{
}
void
fork_lock_exit(void)
{
}
void
atfork_lock_enter(void)
{
}
void
atfork_lock_exit(void)
{
}
_private_forkx(int flags)
{
/*
* We are a child of vfork(); omit all of the fork
* logic and go straight to the system call trap.
* A vfork() child of a multithreaded parent
* must never call fork().
*/
return (-1);
}
if (pid == 0) { /* child */
}
return (pid);
}
/*
* Cannot call fork() from a fork handler.
*/
return (-1);
}
/*
* The functions registered by pthread_atfork() are defined by
* the application and its libraries and we must not hold any
* internal lmutex_lock()-acquired locks while invoking them.
* We hold only udp->atfork_lock to protect the atfork linkages.
* If one of these pthread_atfork() functions attempts to fork
* or to call pthread_atfork(), libc will detect the error and
* fail the call with EDEADLK. Otherwise, the pthread_atfork()
* functions are free to do anything they please (except they
* will not receive any signals).
*/
/*
* Block every other thread attempting thr_suspend() or thr_continue().
*/
/*
* Block all signals.
* Just deferring them via sigoff() is not enough.
* We have to avoid taking a deferred signal in the child
* that was actually sent to the parent before __forkx().
*/
/*
* This suspends all threads but this one, leaving them
* suspended outside of any critical regions in the library.
* Thus, we are assured that no lmutex_lock()-acquired library
* locks are held while we invoke fork() from the current thread.
*/
suspend_fork();
if (pid == 0) { /* child */
/*
* Clear our schedctl pointer.
* Discard any deferred signal that was sent to the parent.
* Because we blocked all signals before __forkx(), a
* deferred signal cannot have been taken by the child.
*/
/* reset the library's data structures to reflect one thread */
} else {
/* restart all threads that were suspended for fork() */
continue_fork(0);
}
return (pid);
}
/*
* fork() is fork1() for both Posix threads and Solaris threads.
* The forkall() interface exists for applications that require
* the semantics of replicating all threads.
*/
_fork(void)
{
return (_private_forkx(0));
}
/*
* Much of the logic here is the same as in forkx().
* See the comments in forkx(), above.
*/
_private_forkallx(int flags)
{
return (-1);
}
if (pid == 0) { /* child */
}
return (pid);
}
return (-1);
}
suspend_fork();
if (pid == 0) {
continue_fork(1);
} else {
continue_fork(0);
}
return (pid);
}
_forkall(void)
{
return (_private_forkallx(0));
}
/*
* Hacks for system calls to provide cancellation
* and improve java garbage collection.
*/
#define PROLOGUE \
{ \
if (nocancel == 0) { \
if (!self->ul_cancel_disabled) { \
if (self->ul_cancel_pending) \
} \
}
#define EPILOGUE \
if (nocancel == 0) { \
} \
}
/*
* Perform the body of the action required by most of the cancelable
* function calls. The return(function_call) part is to allow the
* compiler to make the call be executed with tail recursion, which
* saves a register window on sparc and slightly (not much) improves
*/
#define PERFORM(function_call) \
PROLOGUE \
if (nocancel) \
return (function_call); \
rv = function_call; \
EPILOGUE \
return (rv);
/*
* Specialized prologue for sigsuspend() and pollsys().
* These system calls pass a signal mask to the kernel.
* The kernel replaces the thread's signal mask with the
* temporary mask before the thread goes to sleep. If
* a signal is received, the signal handler will execute
* with the temporary mask, as modified by the sigaction
* for the particular signal.
*
* We block all signals until we reach the kernel with the
* temporary mask. This eliminates race conditions with
* setting the signal mask while signals are being posted.
*/
#define PROLOGUE_MASK(sigmask) \
{ \
if (sigmask) { \
} \
if (nocancel == 0) { \
if (!self->ul_cancel_disabled) { \
if (self->ul_cancel_pending) { \
if (self->ul_sigsuspend) { \
self->ul_sigsuspend = 0;\
restore_signals(self); \
} \
} \
} \
} \
}
/*
* If a signal is taken, we return from the system call wrapper with
* our original signal mask restored (see code in call_user_handler()).
* If not (self->ul_sigsuspend is still non-zero), we must restore our
* original signal mask ourself.
*/
#define EPILOGUE_MASK \
if (nocancel == 0) { \
} \
if (self->ul_sigsuspend) { \
self->ul_sigsuspend = 0; \
restore_signals(self); \
} \
}
/*
* Cancellation prologue and epilogue functions,
* for cancellation points too complex to include here.
*/
void
_cancel_prologue(void)
{
if (self->ul_cancel_prologue == 0) {
if (!self->ul_cancel_disabled) {
if (self->ul_cancel_pending)
}
}
}
void
_cancel_epilogue(void)
{
if (self->ul_cancel_prologue == 0) {
}
}
/*
* Called from _thrp_join() (thr_join() is a cancellation point)
*/
int
{
int error;
;
return (error);
}
{
}
{
}
int
int *flagsp)
{
int rv;
}
int
{
int *, int *);
int rv;
}
int
{
const struct strbuf *, int);
int rv;
}
int
{
const struct strbuf *, int);
int rv;
}
int
{
const struct strbuf *, int, int);
int rv;
}
int
{
const struct strbuf *, int, int);
int rv;
}
int
{
int error;
if (error) {
return (-1);
}
return (0);
}
int
{
int error;
switch (clock_id) {
case CLOCK_VIRTUAL:
case CLOCK_PROCESS_CPUTIME_ID:
case CLOCK_THREAD_CPUTIME_ID:
return (ENOTSUP);
case CLOCK_REALTIME:
case CLOCK_HIGHRES:
break;
default:
return (EINVAL);
}
if (flags & TIMER_ABSTIME) {
} else {
if (clock_id == CLOCK_HIGHRES)
}
/*
* Don't return yet if we didn't really get a timeout.
* This can happen if we return because someone resets
* the system clock.
*/
if (flags & TIMER_ABSTIME) {
goto restart;
}
} else {
goto restart;
}
}
}
(flags & TIMER_ABSTIME)) {
/*
* Don't return yet just because someone reset the
* system clock. Recompute the new relative time
* and reissue the nanosleep() call if necessary.
*
* Resetting the system clock causes all sorts of
* problems and the SUSV3 standards body should
* have made the behavior of clock_nanosleep() be
* implementation-defined in such a case rather than
* being specific about honoring the new system time.
* Standards bodies are filled with fools and idiots.
*/
goto restart;
}
return (error);
}
unsigned int
{
unsigned int rem = 0;
int error;
rem++;
}
return (rem);
}
int
{
return (0);
}
int
{
extern void _aio_close(int);
extern int _close(int);
int rv;
}
int
{
int rv;
}
#if !defined(_LP64)
int
{
int rv;
}
#endif /* !_LP64 */
int
{
extern int _fcntl(int, int, ...);
int rv;
}
int
{
extern int _fdatasync(int);
int rv;
}
int
{
extern int _fsync(int);
int rv;
}
int
{
int rv;
}
#if !defined(_LP64)
int
{
int rv;
}
#endif /* !_LP64 */
{
}
int
{
int rv;
}
int
{
int rv;
}
int
{
extern int _open(const char *, int, ...);
int rv;
}
#if !defined(_LP64)
int
{
extern int _open64(const char *, int, ...);
int rv;
}
#endif /* !_LP64 */
int
pause(void)
{
extern int _pause(void);
int rv;
}
{
}
#if !defined(_LP64)
{
}
#endif /* !_LP64 */
{
}
#if !defined(_LP64)
{
}
#endif /* !_LP64 */
{
}
int
{
extern int _sigpause(int);
int rv;
}
int
{
extern int __sigsuspend(const sigset_t *);
int rv;
return (rv);
}
int
{
const sigset_t *);
int rv;
return (rv);
}
int
{
const timespec_t *);
int sig;
do_sigcancel();
sig = -1;
}
return (sig);
}
int
{
}
int
{
}
int
{
}
int
{
extern int _tcdrain(int);
int rv;
}
{
}
{
}
int
{
int rv;
}
/*
* waitpid_cancel() is a libc-private symbol for internal use
* where cancellation semantics is desired (see system()).
*/
{
}
{
}