/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/signature.hpp"
friend class InterpreterOopMap;
friend class OopMapForCacheEntry;
friend class OopMapCache;
friend class VerifyClosure;
protected:
// Initialization
// fills the bit mask for native calls
// Deallocate bit masks and initialize fields
void flush();
private:
void allocate_bit_mask(); // allocates the bit mask on C heap f necessary
void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
public:
#ifdef ASSERT
_resource_allocate_bit_mask = false;
#endif
}
};
// Implementation of OopMapForCacheEntry
// (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
int _bci;
int _stack_top;
virtual bool report_results() const { return false; }
virtual void fill_stackmap_prolog (int nof_gc_points);
virtual void fill_stackmap_epilog ();
int stack_top);
public:
// Computes stack map for (method,bci) and initialize entry
void compute_map(TRAPS);
int size();
};
OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
_stack_top = -1;
}
// First check if it is a method where the stackmap is always empty
_entry->set_mask_size(0);
} else {
}
}
return false; // We are not reporting any result. We call result_for_basicblock directly
}
// Do nothing
}
// Do nothing
}
// Do nothing
}
int stack_top) {
// Only interested in one specific bci
}
}
}
// Implementation of InterpreterOopMap and OopMapCacheEntry
private:
bool _failed;
public:
};
initialize();
#ifdef ASSERT
_resource_allocate_bit_mask = true;
#endif
}
// The expection is that the bit mask was allocated
// last in this resource area. That would make the free of the
// bit_mask effective (see how FREE_RESOURCE_ARRAY does a free).
// If it was not allocated last, there is not a correctness problem
// but the space for the bit_mask is not freed.
if (mask_size() > small_mask_limit) {
}
}
_bit_mask[0] == 0), "Should be completely empty");
return result;
}
_bci = 0;
for (int i = 0; i < N; i++) _bit_mask[i] = 0;
}
}
}
}
}
int n = number_of_entries();
int word_index = 0;
// iterate over entries
for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
// get current word
if (mask == 0) {
mask = 1;
}
// test for oop
}
}
// If we are doing mark sweep _method may not have a valid header
// $$$ This used to happen only for m/s collections; we might want to
// think of an appropriate generalization of this distinction.
"invalid oop in oopMapCache");
}
#ifdef ENABLE_ZAP_DEAD_LOCALS
void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
int n = number_of_entries();
int word_index = 0;
// iterate over entries
for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
// get current word
if (mask == 0) {
mask = 1;
}
// test for dead values & oops, and for live values
if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops
else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops
}
}
#endif
int n = number_of_entries();
method()->print_value();
for (int i = 0; i < n; i++) {
#ifdef ENABLE_ZAP_DEAD_LOCALS
else
#endif
}
}
private:
void set_one(int i) {
i *= InterpreterOopMap::bits_per_entry;
_mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
}
public:
MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
// initialize with 0
while (i-- > 0) _mask[i] = 0;
}
void generate() {
}
};
bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
// Check mask includes map
VerifyClosure blk(this);
iterate_oop(&blk);
// Check if map is generated correctly
// (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
for(int i = 0; i < max_locals; i++) {
#ifdef ENABLE_ZAP_DEAD_LOCALS
#endif
}
for(int j = 0; j < stack_top; j++) {
#ifdef ENABLE_ZAP_DEAD_LOCALS
#endif
}
return true;
}
if (mask_size() > small_mask_limit) {
}
}
"This bit mask should not be in the resource area");
debug_only(_bit_mask[0] = 0;)
}
}
// fill mask for parameters
}
// Flush entry to deallocate an existing entry
flush();
set_method(method());
// Native method activations have oops only among the parameters and one
// extra oop following the parameters (the mirror for static native methods).
} else {
}
#ifdef ASSERT
verify();
#endif
}
// compute bit mask size
// compute bits
int word_index = 0;
for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
// store last word
if (mask == 0) {
value = 0;
mask = 1;
}
// switch to stack when done with locals
if (entry_index == max_locals) {
}
// set oop bit
if ( cell->is_reference()) {
}
#ifdef ENABLE_ZAP_DEAD_LOCALS
// set dead bit
}
#endif
}
// make sure last word is stored
// verify bit mask
}
initialize();
}
// Implementation of OopMapCache
#ifndef PRODUCT
static long _total_memory_usage = 0;
return _total_memory_usage;
}
#endif
"Should not resource allocate the _bit_mask");
// Is the bit mask contained in the entry?
mask_word_size() * BytesPerWord);
} else {
// The expectation is that this InterpreterOopMap is a recently created
// and empty. It is used to get a copy of a cached entry.
// If the bit mask has a value, it should be in the
// resource area.
"The bit mask should have been allocated from a resource area");
// Allocate the bit_mask from a Resource area for performance. Allocating
// from the C heap as is done for OopMapCache has a significant
// performance impact.
mask_word_size() * BytesPerWord);
}
}
// We use method->code_size() rather than method->identity_hash() below since
// the mark may not be present if a pointer to the method is already reversed.
return ((unsigned int) bci)
}
{
// Cannot call flush for initialization, since flush
// will check if memory should be deallocated
}
OopMapCache::~OopMapCache() {
// Deallocate oop maps that are allocated out-of-line
flush();
// Deallocate array
}
}
}
for (int i = 0; i < _size; i++)
// Cache entry is occupied by an old redefined method and we don't want
// to pin it down so flush the entry.
}
}
}
}
}
int bci,
MutexLocker x(&_mut);
// Search hashtable for match
int i;
for(i = 0; i < _probe_depth; i++) {
return;
}
}
if (TraceOopMapGeneration) {
static int count = 0;
}
// Entry is not in hashtable.
// Compute entry and return it
if (method->should_not_be_cached()) {
// It is either not safe or not a good idea to cache this methodOop
// at this time. We give the caller of lookup() a copy of the
// interesting info via parameter entry_for, but we don't add it to
// the cache. See the gory details in methodOop.cpp.
return;
}
// First search for an empty slot
for(i = 0; i < _probe_depth; i++) {
return;
}
}
if (TraceOopMapGeneration) {
}
// No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
//entry_at(probe + _probe_depth - 1)->flush();
//for(i = _probe_depth - 1; i > 0; i--) {
// // Coping entry[i] = entry[i-1];
// OopMapCacheEntry *to = entry_at(probe + i);
// OopMapCacheEntry *from = entry_at(probe + i - 1);
// to->copy(from);
// }
// Copy the newly cached entry to input parameter
if (TraceOopMapGeneration) {
}
return;
}
// Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
tmp->initialize();
}