/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright (c) 2015, Joyent, Inc. All rights reserved.
*/
/* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
#include "lint.h"
#include "thr_uberdata.h"
#include <stdarg.h>
#include <poll.h>
#include <stropts.h>
#include <dlfcn.h>
#include <wait.h>
/*
* These leading-underbar symbols exist because mistakes were made
* in the past that put them into non-SUNWprivate versions of
* the libc mapfiles. They should be eliminated, but oh well...
*/
#if !defined(_LP64)
#endif
/*
* These are SUNWprivate, but they are being used by Sun Studio libcollector.
*/
/*
* atfork_lock protects the pthread_atfork() data structures.
*
* fork_lock does double-duty. Not only does it (and atfork_lock)
* serialize calls to fork() and forkall(), but it also serializes calls
* to thr_suspend() and thr_continue() (because fork() and forkall() also
* suspend and continue other threads and they want no competition).
*
* Functions called in dlopen()ed L10N objects can do anything, including
* call malloc() and free(). Such calls are not fork-safe when protected
* by an ordinary mutex that is acquired in libc's prefork processing
* because, with an interposed malloc library present, there would be a
* lock ordering violation due to the pthread_atfork() prefork function
* in the interposition library acquiring its malloc lock(s) before the
* ordinary mutex in libc being acquired by libc's prefork functions.
*
* Within libc, calls to malloc() and free() are fork-safe if the calls
* are made while holding no other libc locks. This covers almost all
* of libc's malloc() and free() calls. For those libc code paths, such
* as the above-mentioned L10N calls, that require serialization and that
* may call malloc() or free(), libc uses callout_lock_enter() to perform
* the serialization. This works because callout_lock is not acquired as
* part of running the pthread_atfork() prefork handlers (to avoid the
* lock ordering violation described above). Rather, it is simply
* reinitialized in postfork1_child() to cover the case that some
* now-defunct thread might have been suspended while holding it.
*/
void
fork_lock_enter(void)
{
}
void
fork_lock_exit(void)
{
}
/*
* Use cancel_safe_mutex_lock() to protect against being cancelled while
* holding callout_lock and calling outside of libc (via L10N plugins).
* We will honor a pending cancellation request when callout_lock_exit()
* is called, by calling cancel_safe_mutex_unlock().
*/
void
callout_lock_enter(void)
{
}
void
callout_lock_exit(void)
{
}
{
/*
* We are a child of vfork(); omit all of the fork
* logic and go straight to the system call trap.
* A vfork() child of a multithreaded parent
* must never call fork().
*/
return (-1);
}
if (pid == 0) { /* child */
}
return (pid);
}
/*
* Cannot call fork() from a fork handler.
*/
return (-1);
}
/*
* The functions registered by pthread_atfork() are defined by
* the application and its libraries and we must not hold any
* internal lmutex_lock()-acquired locks while invoking them.
* We hold only udp->atfork_lock to protect the atfork linkages.
* If one of these pthread_atfork() functions attempts to fork
* or to call pthread_atfork(), libc will detect the error and
* fail the call with EDEADLK. Otherwise, the pthread_atfork()
* functions are free to do anything they please (except they
* will not receive any signals).
*/
/*
* Posix (SUSv3) requires fork() to be async-signal-safe.
* This cannot be made to happen with fork handlers in place
* (they grab locks). To be in nominal compliance, don't run
* any fork handlers if we are called within a signal context.
* This leaves the child process in a questionable state with
* respect to its locks, but at least the parent process does
* not become deadlocked due to the calling thread attempting
* to acquire a lock that it already owns.
*/
/*
* Block every other thread attempting thr_suspend() or thr_continue().
*/
/*
* Block all signals.
* Just deferring them via sigoff() is not enough.
* We have to avoid taking a deferred signal in the child
* that was actually sent to the parent before __forkx().
*/
/*
* This suspends all threads but this one, leaving them
* suspended outside of any critical regions in the library.
* Thus, we are assured that no lmutex_lock()-acquired library
* locks are held while we invoke fork() from the current thread.
*/
suspend_fork();
if (pid == 0) { /* child */
/*
* Clear our schedctl pointer.
* Discard any deferred signal that was sent to the parent.
* Because we blocked all signals before __forkx(), a
* deferred signal cannot have been taken by the child.
*/
/* reset the library's data structures to reflect one thread */
} else {
/* restart all threads that were suspended for fork() */
continue_fork(0);
}
return (pid);
}
/*
* fork() is fork1() for both Posix threads and Solaris threads.
* The forkall() interface exists for applications that require
* the semantics of replicating all threads.
*/
fork(void)
{
return (forkx(0));
}
/*
* Much of the logic here is the same as in forkx().
* See the comments in forkx(), above.
*/
{
return (-1);
}
if (pid == 0) { /* child */
}
return (pid);
}
return (-1);
}
suspend_fork();
if (pid == 0) {
continue_fork(1);
} else {
continue_fork(0);
}
return (pid);
}
forkall(void)
{
return (forkallx(0));
}
/*
* For the implementation of cancellation at cancellation points.
*/
#define PROLOGUE \
{ \
int nocancel = \
int abort = 0; \
if (nocancel == 0) { \
if (!self->ul_cancel_disabled) { \
if (self->ul_cancel_pending) \
} \
} else if (self->ul_cancel_pending && \
!self->ul_cancel_disabled) { \
abort = 1; \
}
#define EPILOGUE \
if (nocancel == 0) { \
} \
}
/*
* Perform the body of the action required by most of the cancelable
* function calls. The return(function_call) part is to allow the
* compiler to make the call be executed with tail recursion, which
* saves a register window on sparc and slightly (not much) improves
*/
PROLOGUE \
if (abort) { \
return (-1); \
} \
if (nocancel) \
return (function_call); \
rv = function_call; \
EPILOGUE \
return (rv);
/*
* Specialized prologue for sigsuspend() and pollsys().
* These system calls pass a signal mask to the kernel.
* The kernel replaces the thread's signal mask with the
* temporary mask before the thread goes to sleep. If
* a signal is received, the signal handler will execute
* with the temporary mask, as modified by the sigaction
* for the particular signal.
*
* We block all signals until we reach the kernel with the
* temporary mask. This eliminates race conditions with
* setting the signal mask while signals are being posted.
*/
{ \
int nocancel = \
if (sigmask) { \
} \
if (nocancel == 0) { \
if (!self->ul_cancel_disabled) { \
if (self->ul_cancel_pending) { \
if (self->ul_sigsuspend) { \
self->ul_sigsuspend = 0;\
restore_signals(self); \
} \
} \
} \
} \
}
/*
* If a signal is taken, we return from the system call wrapper with
* our original signal mask restored (see code in call_user_handler()).
* If not (self->ul_sigsuspend is still non-zero), we must restore our
* original signal mask ourself.
*/
#define EPILOGUE_MASK \
if (nocancel == 0) { \
} \
if (self->ul_sigsuspend) { \
self->ul_sigsuspend = 0; \
restore_signals(self); \
} \
}
/*
* Cancellation prologue and epilogue functions,
* for cancellation points too complex to include here.
*/
void
_cancel_prologue(void)
{
if (self->ul_cancel_prologue == 0) {
if (!self->ul_cancel_disabled) {
if (self->ul_cancel_pending)
}
} else if (self->ul_cancel_pending &&
!self->ul_cancel_disabled) {
}
}
void
_cancel_epilogue(void)
{
if (self->ul_cancel_prologue == 0) {
}
}
/*
* Called from _thrp_join() (thr_join() is a cancellation point)
*/
int
{
int error;
if (abort)
return (EINTR);
continue;
return (error);
}
{
}
{
}
int
int *flagsp)
{
int rv;
}
int
{
int *, int *);
int rv;
}
int
{
const struct strbuf *, int);
int rv;
}
int
{
const struct strbuf *, int);
int rv;
}
int
{
const struct strbuf *, int, int);
int rv;
}
int
{
const struct strbuf *, int, int);
int rv;
}
int
{
int error;
if (error) {
return (-1);
}
return (0);
}
int
{
int error;
switch (clock_id) {
case CLOCK_VIRTUAL:
case CLOCK_PROCESS_CPUTIME_ID:
case CLOCK_THREAD_CPUTIME_ID:
return (ENOTSUP);
case CLOCK_REALTIME:
case CLOCK_HIGHRES:
break;
default:
return (EINVAL);
}
if (flags & TIMER_ABSTIME) {
} else {
if (clock_id == CLOCK_HIGHRES)
}
/*
* Don't return yet if we didn't really get a timeout.
* This can happen if we return because someone resets
* the system clock.
*/
if (flags & TIMER_ABSTIME) {
goto restart;
}
} else {
goto restart;
}
}
}
(flags & TIMER_ABSTIME)) {
/*
* Don't return yet just because someone reset the
* system clock. Recompute the new relative time
* and reissue the nanosleep() call if necessary.
*
* Resetting the system clock causes all sorts of
* problems and the SUSV3 standards body should
* have made the behavior of clock_nanosleep() be
* implementation-defined in such a case rather than
* being specific about honoring the new system time.
* Standards bodies are filled with fools and idiots.
*/
goto restart;
}
return (error);
}
unsigned int
{
unsigned int rem = 0;
rem++;
}
return (rem);
}
int
{
return (0);
}
int
{
extern void _aio_close(int);
extern int __close(int);
int rv;
/*
* If we call _aio_close() while in a critical region,
* we will draw an ASSERT() failure, so don't do it.
* No calls to close() from within libc need _aio_close();
* only the application's calls to close() need this,
* and such calls are never from a libc critical region.
*/
if (curthread->ul_critical == 0)
}
int
{
extern int __door_call(int, door_arg_t *);
int rv;
}
int
{
extern int __fcntl(int, int, ...);
int rv;
}
int
{
extern int __fdsync(int, int);
int rv;
}
int
{
extern int __fdsync(int, int);
int rv;
}
int
{
int rv;
}
#if !defined(_LP64)
int
{
int rv;
}
#endif /* !_LP64 */
{
}
int
{
int rv;
}
int
{
int rv;
}
int
{
int rv;
}
int
{
int rv;
}
int
{
}
#if !defined(_LP64)
int
{
int rv;
}
int
{
int rv;
}
int
{
}
#endif /* !_LP64 */
int
pause(void)
{
extern int __pause(void);
int rv;
}
{
}
#if !defined(_LP64)
{
}
{
offset>>32))
}
#endif /* !_LP64 */
{
}
{
}
#if !defined(_LP64)
{
}
{
extern ssize_t __pwritev64(int,
}
#endif /* !_LP64 */
{
}
{
}
int
{
extern int __sigpause(int);
int rv;
}
int
{
extern int __sigsuspend(const sigset_t *);
int rv;
return (rv);
}
int
{
const sigset_t *);
int rv;
return (rv);
}
int
{
const timespec_t *);
int sig;
if (abort) {
sig = -1;
} else {
do_sigcancel();
sig = -1;
}
}
return (sig);
}
int
{
}
int
{
}
int
{
}
int
int flags)
{
int rv;
}
int
{
int rv;
}
int
{
int rv;
}
int
{
extern int __so_recvfrom(int, void *, size_t, int,
struct sockaddr *, int *);
int rv;
}
int
{
extern int __so_recvmsg(int, struct msghdr *, int);
int rv;
}
int
{
int rv;
}
int
{
extern int __so_sendmsg(int, const struct msghdr *, int);
int rv;
}
int
{
extern int __so_sendto(int, const void *, size_t, int,
const struct sockaddr *, int *);
int rv;
}
int
{
extern int __tcdrain(int);
int rv;
}
int
{
int rv;
}
{
}