/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "oops/klass.inline.hpp"
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
static bool _biased_locking_enabled = false;
}
private:
bool _is_cheap_allocated;
public:
void doit() {
// Iterate the system dictionary enabling biased locking for all
// currently loaded classes
// Indicate that future instances should enable it as well
_biased_locking_enabled = true;
if (TraceBiasedLocking) {
}
}
bool allow_nested_vm_operations() const { return false; }
};
// One-shot PeriodicTask subclass for enabling biased locking
public:
virtual void task() {
// Use async VM operation to avoid blocking the Watcher thread.
// VM Thread will free C heap storage.
// Reclaim our storage and disenroll ourself
delete this;
}
};
// If biased locking is enabled, schedule a task to fire a few
// seconds into the run which turns on biased locking for all
// currently loaded classes as well as future ones. This is a
// workaround for startup time regressions due to a large number of
// safepoints being taken during VM startup for bias revocation.
// Ideally we would have a lower cost for individual bias revocation
// and not need a mechanism like this.
if (UseBiasedLocking) {
if (BiasedLockingStartupDelay > 0) {
} else {
VM_EnableBiasedLocking op(false);
}
}
}
return _biased_locking_enabled;
}
// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
return info;
}
// It's possible for the thread to not have any Java frames on it,
// i.e., if it's the main thread and it's already returned from main()
if (thread->has_last_Java_frame()) {
// Walk monitors youngest to oldest
for (int i = len - 1; i >= 0; i--) {
if (mon_info->owner_is_scalar_replaced()) continue;
}
}
}
}
}
return info;
}
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
if (!mark->has_bias_pattern()) {
if (TraceBiasedLocking) {
}
return BiasedLocking::NOT_BIASED;
}
tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
(intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
}
if (biased_thread == NULL) {
// Object is anonymously biased. We can get here if, for
// example, we revoke the bias due to an identity hash code
// being computed for an object.
if (!allow_rebias) {
}
}
return BiasedLocking::BIAS_REVOKED;
}
// Handle case where the thread toward which the object was biased has exited
bool thread_is_alive = false;
if (requesting_thread == biased_thread) {
thread_is_alive = true;
} else {
for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
if (cur_thread == biased_thread) {
thread_is_alive = true;
break;
}
}
}
if (!thread_is_alive) {
if (allow_rebias) {
} else {
}
}
return BiasedLocking::BIAS_REVOKED;
}
// Thread owning bias is alive.
// Check to see whether it currently owns the lock and, if so,
// write down the needed displaced headers to the thread's stack.
// Otherwise, restore the object's header either to the unlocked
// or unbiased state.
for (int i = 0; i < cached_monitor_info->length(); i++) {
if (TraceBiasedLocking && Verbose) {
}
// Assume recursive case and fix up highest lock later
} else {
if (TraceBiasedLocking && Verbose) {
}
}
}
if (highest_lock != NULL) {
// Fix up highest lock to contain displaced header and point
// object at it
// Reset object header to point to displaced mark
}
} else {
}
if (allow_rebias) {
} else {
// Store the unlocked value into the object's header.
}
}
return BiasedLocking::BIAS_REVOKED;
}
enum HeuristicsResult {
};
if (!mark->has_bias_pattern()) {
return HR_NOT_BIASED;
}
// Heuristics to attempt to throttle the number of revocations.
// Stages:
// 1. Revoke the biases of all objects in the heap of this type,
// but allow rebiasing of those objects if unlocked.
// 2. Revoke the biases of all objects in the heap of this type
// and don't allow rebiasing of these objects. Disable
// allocation of objects of that type with the bias bit set.
if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
(last_bulk_revocation_time != 0) &&
// This is the first revocation we've seen in a while of an
// object of this type since the last time we performed a bulk
// rebiasing operation. The application is allocating objects in
// bulk which are biased toward a thread and then handing them
// off to another thread. We can cope with this allocation
// pattern via the bulk rebiasing mechanism so we reset the
// klass's revocation count rather than allow it to increase
// monotonically. If we see the need to perform another bulk
// rebias operation later, we will, and if subsequently we see
// many more revocation operations in a short period of time we
// will completely disable biasing for this type.
revocation_count = 0;
}
// Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
}
return HR_BULK_REVOKE;
}
return HR_BULK_REBIAS;
}
return HR_SINGLE_REVOKE;
}
bool bulk_rebias,
bool attempt_rebias_of_object,
if (TraceBiasedLocking) {
}
if (bulk_rebias) {
// Use the epoch in the klass of the object to implicitly revoke
// all biases of objects of this data type and force them to be
// reacquired. However, we also need to walk the stacks of all
// threads and update the headers of lightweight locked objects
// with biases to have the current epoch.
// If the prototype header doesn't have the bias pattern, don't
// try to update the epoch -- assume another VM operation came in
// and reset the header to the unbiased state, which will
// implicitly cause all existing biases to be revoked
// Now walk all threads' stacks and adjust epochs of any biased
// and locked objects of this data type we encounter
for (int i = 0; i < cached_monitor_info->length(); i++) {
// We might have encountered this object already in the case of recursive locking
assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
}
}
}
}
// At this point we're done. All we have to do is potentially
// adjust the header of the given object to revoke its bias.
revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
} else {
if (TraceBiasedLocking) {
}
// Disable biased locking for this data type. Not only will this
// cause future instances to not be biased, but existing biased
// instances will notice that this implicitly caused their biases
// to be revoked.
// Now walk all threads' stacks and forcibly revoke the biases of
// any locked and biased objects of this data type we encounter.
for (int i = 0; i < cached_monitor_info->length(); i++) {
}
}
}
// Must force the bias of the passed object to be forcibly revoked
// as well to ensure guarantees to callers
revoke_bias(o, false, true, requesting_thread);
}
if (TraceBiasedLocking) {
}
if (attempt_rebias_of_object &&
o->mark()->has_bias_pattern() &&
if (TraceBiasedLocking) {
}
}
"bug in bulk bias revocation");
return status_code;
}
static void clean_up_cached_monitor_info() {
// Walk the thread list clearing out the cached monitors
}
}
protected:
public:
virtual bool doit_prologue() {
// Verify that there is actual work to do since the callers just
// give us locked object(s). If we don't find any biased objects
// there is nothing to do and we avoid a safepoint.
if (mark->has_bias_pattern()) {
return true;
}
} else {
if (mark->has_bias_pattern()) {
return true;
}
}
}
return false;
}
virtual void doit() {
if (TraceBiasedLocking) {
}
return;
} else {
if (TraceBiasedLocking) {
}
}
}
return _status_code;
}
};
private:
bool _bulk_rebias;
public:
bool bulk_rebias,
bool attempt_rebias_of_object)
virtual bool doit_prologue() { return true; }
virtual void doit() {
_status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
}
};
// We can revoke the biases of anonymously-biased objects
// efficiently enough that we should not cause these revocations to
// update the heuristics because doing so may cause unwanted bulk
// revocations (which are expensive) to occur.
// We are probably trying to revoke the bias of this object due to
// an identity hash code computation. Try to revoke the bias
// without a safepoint. This is possible if we can successfully
// compare-and-exchange an unbiased header into the mark word of
// the object, meaning that no other thread has raced to acquire
// the bias of the object.
if (res_mark == biased_value) {
return BIAS_REVOKED;
}
} else if (mark->has_bias_pattern()) {
if (!prototype_header->has_bias_pattern()) {
// This object has a stale bias from before the bulk revocation
// for this data type occurred. It's pointless to update the
// heuristics at this point so simply update the header with a
// CAS. If we fail this race, the object's bias has been revoked
// by another thread so we simply return and let the caller deal
// with it.
return BIAS_REVOKED;
// The epoch of this biasing has expired indicating that the
// object is effectively unbiased. Depending on whether we need
// to rebias or revoke the bias of this object we can do it
// efficiently enough with a CAS that we shouldn't update the
// heuristics. This is normally done in the assembly code but we
// can reach this point due to various points in the runtime
// needing to revoke biases.
if (attempt_rebias) {
markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
if (res_mark == biased_value) {
return BIAS_REVOKED_AND_REBIASED;
}
} else {
if (res_mark == biased_value) {
return BIAS_REVOKED;
}
}
}
}
if (heuristics == HR_NOT_BIASED) {
return NOT_BIASED;
} else if (heuristics == HR_SINGLE_REVOKE) {
// A thread is trying to revoke the bias of an object biased
// toward it, again likely due to an identity hash code
// computation. We can again avoid a safepoint in this case
// since we are only going to walk our own stack. There are no
// races with revocations occurring in other threads because we
// reach no safepoints in the revocation path.
// Also check the epoch because even if threads match, another thread
// can come in with a CAS to steal the bias of an object that has a
// stale epoch.
if (TraceBiasedLocking) {
}
return cond;
} else {
return revoke.status_code();
}
}
(heuristics == HR_BULK_REBIAS),
return bulk_revoke.status_code();
}
return;
}
}
if (heuristics == HR_SINGLE_REVOKE) {
} else if ((heuristics == HR_BULK_REBIAS) ||
(heuristics == HR_BULK_REVOKE)) {
}
}
for (int i = 0; i < len; i++) {
if (heuristics == HR_SINGLE_REVOKE) {
} else if ((heuristics == HR_BULK_REBIAS) ||
(heuristics == HR_BULK_REVOKE)) {
}
}
}
if (!UseBiasedLocking)
return;
// In order to reduce the number of mark words preserved during GC
// due to the presence of biased locking, we reinitialize most mark
// words to the class's prototype during GC -- even those which have
// a currently valid bias owner. One important situation where we
// must not clobber a bias is when a biased object is currently
// locked. To handle this case we iterate over the currently-locked
// monitors in a prepass and, if they are biased, preserve their
// mark words here. This should be a relatively small set of objects
// especially compared to the number of objects in the heap.
if (thread->has_last_Java_frame()) {
// Walk monitors youngest to oldest
for (int i = len - 1; i >= 0; i--) {
if (mon_info->owner_is_scalar_replaced()) continue;
if (mark->has_bias_pattern()) {
}
}
}
}
}
}
}
}
if (!UseBiasedLocking)
return;
for (int i = 0; i < len; i++) {
}
delete _preserved_oop_stack;
delete _preserved_mark_stack;
}
int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
// BiasedLockingCounters
if (_slow_path_entry_count != 0) {
return _slow_path_entry_count;
}
return _total_entry_count - sum;
}
}