memSnapshot.hpp revision 4168
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "memory/allocation.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memBaseline.hpp"
#include "services/memPtrArray.hpp"
// Snapshot pointer array iterator
// The pointer array contains malloc-ed pointers
class MemPointerIterator : public MemPointerArrayIteratorImpl {
public:
}
#ifdef ASSERT
const MemPointer* ptr2) const {
return false;
}
}
if (_pos > 0) {
}
}
}
if (_pos > 0) {
}
}
_pos ++;
return true;
}
return false;
}
#endif
}
return cur;
}
};
class VMMemPointerIterator : public MemPointerIterator {
public:
}
// locate an existing reserved memory region that contains specified address,
// or the reserved region just above this address, where the incoming
// reserved region should be inserted.
reset();
if (reg->is_reserved_region()) {
return reg;
}
}
}
return NULL;
}
// following methods update virtual memory in the context
// of 'current' position, which is properly positioned by
// callers via locate method.
// split a reserved region to create a new memory region with specified base and size
private:
// reset current position
#ifdef ASSERT
// check integrity of records on current reserved memory region.
bool check_reserved_region() {
"Sanity check");
// all committed regions that follow current reserved region, should all
// belong to the reserved region.
return false;
}
}
return true;
}
const MemPointer* ptr2) const {
return false;
}
}
#endif
};
class MallocRecordIterator : public MemPointerArrayIterator {
private:
public:
}
virtual MemPointer* current() const {
#ifdef ASSERT
}
#endif
}
virtual MemPointer* next() {
// arena memory record is a special case, which we have to compare
// sequence number against its associated arena record.
// if there is an associated arena record, it has to be previous
// record because of sorting order (by address) - NMT generates a pseudo address
// for arena's size record by offsetting arena's address, that guarantees
// the order of arena record and it's size record.
// Skip this arena memory record
// Two scenarios:
// - if the arena record is an allocation record, this early
// size record must be leftover by previous arena,
// and the last size record should have size = 0.
// - if the arena record is a deallocation record, this
// size record should be its cleanup record, which should
// also have size = 0. In other world, arena alway reset
// its size before gone (see Arena's destructor)
} else {
"Arena size record ahead of allocation record");
}
}
}
return next_rec;
}
void remove() { ShouldNotReachHere(); }
};
// collapse duplicated records. Eliminating duplicated records here, is much
// cheaper than during promotion phase. However, it does have limitation - it
// can only eliminate duplicated records within the generation, there are
// still chances seeing duplicated records during promotion.
// We want to use the record with higher sequence number, because it has
// more accurate callsite pc.
class VMRecordIterator : public MemPointerArrayIterator {
private:
public:
"pre-sort order");
} else {
break;
}
}
}
virtual MemPointer* current() const {
}
// get next record, but skip the duplicated records
virtual MemPointer* next() {
"pre-sort order");
} else {
break;
}
}
return cur;
}
void remove() { ShouldNotReachHere(); }
private:
return ret;
}
};
class StagingArea : public _ValueObj {
private:
public:
init();
}
~StagingArea() {
}
return MallocRecordIterator(malloc_data());
}
bool init();
void clear() {
_malloc_data->shrink();
_malloc_data->clear();
}
};
class MemBaseline;
private:
// the following two arrays contain records of all known lived memory blocks
// live malloc-ed memory pointers
// live virtual memory pointers
// the lock to protect this snapshot
// the number of instance classes
int _number_of_classes;
friend class MemBaseline;
public:
MemSnapshot();
virtual ~MemSnapshot();
// if we are running out of native memory
bool out_of_memory() {
return (_alloc_ptrs == NULL ||
_alloc_ptrs->out_of_memory() ||
_vm_ptrs->out_of_memory());
}
// merge a per-thread memory recorder into staging area
// promote staged data to snapshot
bool promote(int number_of_classes);
int number_of_classes() const { return _number_of_classes; }
}
NOT_PRODUCT(void check_staging_data();)
NOT_PRODUCT(void check_malloc_pointers();)
// dump all virtual memory pointers in snapshot
DEBUG_ONLY( void dump_all_vm_pointers();)
private:
// copy sequenced pointer from src to dest
// assign a sequenced pointer to non-sequenced pointer
};
#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP