/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "mutex_solaris.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "thread_solaris.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
# include <pthread.h>
# include <signal.h>
# include <setjmp.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>
# include <thread.h>
# include <sys/systeminfo.h>
# include <pwd.h>
# include <poll.h>
#ifndef AMD64
// QQQ seems useless at this point
#endif // AMD64
// Minimum stack size for the VM. It's easier to document a constant value
// but it's different for x86 and sparc because the page sizes are different.
#ifdef AMD64
#else
// 4900493 counter to prevent runaway LDTR refresh attempt
static volatile int ldtr_refresh = 0;
// the libthread instruction that faults because of the stale LDTR
};
#endif // AMD64
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
return (char*) -1;
}
//
// Validate a ucontext retrieved from walking a uc_link of a ucontext.
// There are issues with libthread giving out uc_links for different threads
// on the same uc_link chain and bad or circular links.
//
return false;
}
if (thread->is_Java_thread()) {
return false;
}
return false;
}
}
return true;
}
// We will only follow one level of uc_link since there are libthread
// issues with ucontext linking and it is better to be safe and just
// let caller retry later.
ucontext_t *uc) {
// cannot validate without uc_link so accept current ucontext
// first ucontext is valid so try the next one
// cannot validate without uc_link so accept current ucontext
// the ucontext one level down is also valid so return it
}
}
}
return retuc;
}
// Assumes ucontext is valid
}
// Assumes ucontext is valid
}
// Assumes ucontext is valid
}
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread
// is currently interrupted by SIGPROF.
//
// The difference between this and os::fetch_frame_from_context() is that
// here we try to skip nested signal frames.
}
} else {
// construct empty ExtendedPC for return value checking
}
return epc;
}
}
}
return (address)_get_current_sp();
}
// stack is not walkable
return ret;
} else {
}
}
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
return (err);
*lwp);
perror("thr_mutator_status: open lwpstatus");
return (EINVAL);
}
sizeof (lwpstatus_t)) {
perror("thr_mutator_status: read lwpstatus");
return (EINVAL);
}
}
return (0);
}
#ifndef AMD64
// Detecting SSE support by OS
// From solaris_i486.s
extern "C" bool sse_check();
extern "C" bool sse_unavailable();
static void check_for_sse_support() {
if (!VM_Version::supports_sse()) {
return;
}
// looking for _sse_hw in libc.so, if it does not exist or
// the value (int) is 0, OS has no support for SSE
int *sse_hwp;
void *h;
//open failed, presume no support for SSE
return;
}
} else if (*sse_hwp == 0) {
}
dlclose(h);
if (sse_status == SSE_UNKNOWN) {
}
}
#endif // AMD64
#ifdef AMD64
return true;
#else
if (sse_status == SSE_UNKNOWN)
return sse_status == SSE_SUPPORTED;
#endif // AMD64
}
#ifdef AMD64
return true;
#else
if (bytes < 2 * G) {
return true;
}
}
#endif // AMD64
}
extern "C" void Fetch32PFI () ;
extern "C" void Fetch32Resume () ;
#ifdef AMD64
extern "C" void FetchNPFI () ;
extern "C" void FetchNResume () ;
#endif // AMD64
extern "C" JNIEXPORT int
int abort_if_unrecognized) {
#ifndef AMD64
// the SSE instruction faulted. supports_sse() need return false.
return true;
}
#endif // !AMD64
SignalHandlerMark shm(t);
return true;
} else {
warning("Ignoring %s - see 4229104 or 6499219",
}
return true;
}
}
if (t != NULL ){
if(t->is_Java_thread()) {
thread = (JavaThread*)t;
}
else if(t->is_VM_thread()){
}
}
}
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
return true;
return true;
} else {
// If os::Solaris::SIGasync not chained, and this is a non-vm and
// non-java thread
return true;
}
}
// can't decode this kind of signal
} else {
}
// decide if this trap can be handled by a stub
//%note os_trap_1
// factor me: getPCfromContext
// SafeFetch32() support
return true ;
}
#ifdef AMD64
return true ;
}
#endif // AMD64
// Handle ALL stack overflow variations here
// Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack.
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else {
// Thread was in the vm or native code. Return and try to finish.
return true;
}
// Fatal red zone violation. Disable the guard pages and fall through
// to handle_unexpected_exception way down below.
}
}
}
}
// Support Safepoint Polling
}
// BugId 4454115: A read from a MappedByteBuffer can fault
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
}
}
else
// integer divide by zero
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
}
#ifndef AMD64
// floating-point divide by zero
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
}
// The encoding of D2I in i486.ad can cause an exception prior
// to the fist instruction if there was an invalid operation
// pending. We want to dismiss that exception. From the win_32
// side it also seems that if it really was the fist causing
// the exception that we do the d2i by hand with different
// rounding. Seems kind of weird. QQQ TODO
// Note that we take the exception at the NEXT floating point instruction.
if (pc[0] == 0xDB) {
return true;
} else {
}
}
}
#endif // !AMD64
// QQQ It doesn't seem that we need to do this on x86 because we should be able
// to return properly from the handler without this extra stuff on the back side.
else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
// and the heap gets shrunk before the field access.
}
}
// Check to see if we caught the safepoint code in the
// process of write protecting the memory serialization page.
// It write enables the page immediately after protecting it
// so we can just return to retry the write.
// Block current thread until the memory serialize page permission restored.
return true;
}
}
// Execution protection violation
//
// Preventative code for future versions of Solaris which may
// enable execution protection when running the 32-bit VM on AMD64.
//
// This should be kept as the last step in the triage. We don't
// have a dedicated trap number for a no-execute fault, so be
// conservative and allow other handlers the first shot.
//
// Note: We don't test that info->si_code == SEGV_ACCERR here.
// this si_code is so generic that it is almost meaningless; and
// the si_code for this condition may change in the future.
// Furthermore, a false-positive should be harmless.
if (UnguardOnExecutionViolation > 0 &&
// Make sure the pc and the faulting address are sane.
//
// If an instruction spans a page boundary, and the page containing
// the beginning of the instruction is executable but the following
// page is not, the pc and the faulting address might be slightly
// different - we still want to unguard the 2nd page in this case.
//
// 15 bytes seems to be a (very) safe value for max instruction size.
bool pc_is_near_addr =
bool instr_spans_page_boundary =
// In conservative mode, don't unguard unless the address is in the VM
// Make memory rwx and retry
os::MEM_PROT_RWX);
if (PrintMiscellaneous && Verbose) {
"at " INTPTR_FORMAT
}
// Set last_addr so if we fault again at the same address, we don't end
// up in an endless loop.
//
// There are two potential complications here. Two threads trapping at
// the same address at the same time could cause one of the threads to
// think it already unguarded, and abort the VM. Likely very rare.
//
// The other race involves two threads alternately trapping at
// different addresses and failing to unguard the page, resulting in
// an endless loop. This condition is probably even more unlikely than
// the first.
//
// Although both cases could be avoided by using locks or thread local
// last_addr, these solutions are unnecessary complication: this
// handler is a best-effort safety net, not a complete solution. It is
// disabled by default and should only be used as a workaround in case
// we missed any no-execute-unsafe VM code.
}
}
}
// save all thread context in case we need to restore it
// 12/02/99: On Sparc it appears that the full context is also saved
// but as yet, no one looks at or restores that saved context
// factor me: setPC
return true;
}
// signal-chaining
return true;
}
#ifndef AMD64
// Workaround (bug 4900493) for Solaris kernel bug 4966651.
// Handle an undefined selector caused by an attempt to assign
// fs in libthread getipriptr(). With the current libthread design every 512
// thread creations the LDT for a private thread data structure is extended
// and thre is a hazard that and another thread attempting a thread creation
// will use a stale LDTR that doesn't reflect the structure's growth,
// causing a GP fault.
// Enforce the probable limit of passes through here to guard against an
// infinite loop if some other move to fs caused the GP fault. Note that
// this loop counter is ultimately a heuristic as it is possible for
// more than one thread to generate this fault at a time in an MP system.
// In the case of the loop count being exceeded or if the poll fails
// just fall through to a fatal error.
// If there is some other source of T_GPFLT traps and the text at EIP is
// unreadable this code will loop infinitely until the stack is exausted.
// The key to diagnosis in this case is to look for the bottom signal handler
// frame.
if(! IgnoreLibthreadGPFault) {
const unsigned char *p =
// Expected instruction?
// Infinite loop?
// No, force scheduling to get a fresh view of the LDTR
// Retry the move
return false;
}
}
}
}
}
#endif // !AMD64
if (!abort_if_unrecognized) {
// caller wants another chance, so give it to him
return false;
}
warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand);
}
}
}
// unmask current signal
// Determine which sort of error to throw. Out of swap may signal
// on the thread stack, which could get a mapping error when touched.
vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
}
}
#ifdef AMD64
#else
#endif // AMD64
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
}
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
// this is only for the "general purpose" registers
#ifdef AMD64
#else
#endif
}
#ifdef AMD64
// Nothing to do
}
#else
// From solaris_i486.s
extern "C" void fixcw();
// Set fpu to 53 bit precision. This happens too early to use a stub.
fixcw();
}
// These routines are the initial value of atomic_xchg_entry(),
// atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry()
// until initialization is complete.
// TODO - replace with .il implementation when compiler supports it.
// try to use the stub:
}
*dest = exchange_value;
return old_value;
}
// try to use the stub:
}
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
// try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
}
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
// try to use the stub:
}
}
}
#endif // AMD64
#ifndef PRODUCT
#ifdef AMD64
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
#endif
}
#endif