3863N/A/*
4168N/A * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
3863N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3863N/A *
3863N/A * This code is free software; you can redistribute it and/or modify it
3863N/A * under the terms of the GNU General Public License version 2 only, as
3863N/A * published by the Free Software Foundation.
3863N/A *
3863N/A * This code is distributed in the hope that it will be useful, but WITHOUT
3863N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3863N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
3863N/A * version 2 for more details (a copy is included in the LICENSE file that
3863N/A * accompanied this code).
3863N/A *
3863N/A * You should have received a copy of the GNU General Public License version
3863N/A * 2 along with this work; if not, write to the Free Software Foundation,
3863N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
3863N/A *
3863N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
3863N/A * or visit www.oracle.com if you need additional information or have any
3863N/A * questions.
3863N/A *
3863N/A */
3863N/A
3863N/A#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
3863N/A#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
3863N/A
3863N/A#include "memory/allocation.hpp"
3863N/A#include "runtime/mutex.hpp"
3863N/A#include "runtime/mutexLocker.hpp"
3863N/A#include "services/memBaseline.hpp"
3863N/A#include "services/memPtrArray.hpp"
3863N/A
3863N/A// Snapshot pointer array iterator
3863N/A
3863N/A// The pointer array contains malloc-ed pointers
3863N/Aclass MemPointerIterator : public MemPointerArrayIteratorImpl {
3863N/A public:
3863N/A MemPointerIterator(MemPointerArray* arr):
3863N/A MemPointerArrayIteratorImpl(arr) {
3863N/A assert(arr != NULL, "null array");
3863N/A }
3863N/A
3863N/A#ifdef ASSERT
3863N/A virtual bool is_dup_pointer(const MemPointer* ptr1,
3863N/A const MemPointer* ptr2) const {
3863N/A MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
3863N/A MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
3863N/A
3863N/A if (p1->addr() != p2->addr()) return false;
3863N/A if ((p1->flags() & MemPointerRecord::tag_masks) !=
3863N/A (p2->flags() & MemPointerRecord::tag_masks)) {
3863N/A return false;
3863N/A }
3863N/A // we do see multiple commit/uncommit on the same memory, it is ok
3863N/A return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
3863N/A (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
3863N/A }
3863N/A
3863N/A virtual bool insert(MemPointer* ptr) {
3863N/A if (_pos > 0) {
3863N/A MemPointer* p1 = (MemPointer*)ptr;
3863N/A MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
3863N/A assert(!is_dup_pointer(p1, p2),
3949N/A err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
3863N/A }
3863N/A if (_pos < _array->length() -1) {
3863N/A MemPointer* p1 = (MemPointer*)ptr;
3863N/A MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
3863N/A assert(!is_dup_pointer(p1, p2),
3949N/A err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
3863N/A }
3863N/A return _array->insert_at(ptr, _pos);
3863N/A }
3863N/A
3863N/A virtual bool insert_after(MemPointer* ptr) {
3863N/A if (_pos > 0) {
3863N/A MemPointer* p1 = (MemPointer*)ptr;
3863N/A MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
3863N/A assert(!is_dup_pointer(p1, p2),
3949N/A err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
3863N/A }
3863N/A if (_pos < _array->length() - 1) {
3863N/A MemPointer* p1 = (MemPointer*)ptr;
3863N/A MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
3863N/A
3863N/A assert(!is_dup_pointer(p1, p2),
3949N/A err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
3863N/A }
3863N/A if (_array->insert_at(ptr, _pos + 1)) {
3863N/A _pos ++;
3863N/A return true;
3863N/A }
3863N/A return false;
3863N/A }
3863N/A#endif
3863N/A
3863N/A virtual MemPointer* locate(address addr) {
3863N/A MemPointer* cur = current();
3863N/A while (cur != NULL && cur->addr() < addr) {
3863N/A cur = next();
3863N/A }
3863N/A return cur;
3863N/A }
3863N/A};
3863N/A
3863N/Aclass VMMemPointerIterator : public MemPointerIterator {
3863N/A public:
3863N/A VMMemPointerIterator(MemPointerArray* arr):
3863N/A MemPointerIterator(arr) {
3863N/A }
3863N/A
4064N/A // locate an existing reserved memory region that contains specified address,
4064N/A // or the reserved region just above this address, where the incoming
4064N/A // reserved region should be inserted.
3863N/A virtual MemPointer* locate(address addr) {
4064N/A reset();
4064N/A VMMemRegion* reg = (VMMemRegion*)current();
4064N/A while (reg != NULL) {
4064N/A if (reg->is_reserved_region()) {
4064N/A if (reg->contains_address(addr) || addr < reg->base()) {
4064N/A return reg;
3863N/A }
3863N/A }
4064N/A reg = (VMMemRegion*)next();
4064N/A }
4058N/A return NULL;
4058N/A }
4064N/A
4064N/A // following methods update virtual memory in the context
4064N/A // of 'current' position, which is properly positioned by
4064N/A // callers via locate method.
4064N/A bool add_reserved_region(MemPointerRecord* rec);
4064N/A bool add_committed_region(MemPointerRecord* rec);
4064N/A bool remove_uncommitted_region(MemPointerRecord* rec);
4064N/A bool remove_released_region(MemPointerRecord* rec);
3863N/A
4064N/A // split a reserved region to create a new memory region with specified base and size
4064N/A bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
4064N/A private:
4064N/A bool insert_record(MemPointerRecord* rec);
4064N/A bool insert_record_after(MemPointerRecord* rec);
4064N/A
4064N/A bool insert_reserved_region(MemPointerRecord* rec);
4064N/A
4064N/A // reset current position
4064N/A inline void reset() { _pos = 0; }
3863N/A#ifdef ASSERT
4068N/A // check integrity of records on current reserved memory region.
4068N/A bool check_reserved_region() {
4068N/A VMMemRegion* reserved_region = (VMMemRegion*)current();
4068N/A assert(reserved_region != NULL && reserved_region->is_reserved_region(),
4068N/A "Sanity check");
4068N/A // all committed regions that follow current reserved region, should all
4068N/A // belong to the reserved region.
4068N/A VMMemRegion* next_region = (VMMemRegion*)next();
4068N/A for (; next_region != NULL && next_region->is_committed_region();
4068N/A next_region = (VMMemRegion*)next() ) {
4068N/A if(!reserved_region->contains_region(next_region)) {
4068N/A return false;
4068N/A }
4068N/A }
4068N/A return true;
4068N/A }
4068N/A
3863N/A virtual bool is_dup_pointer(const MemPointer* ptr1,
3863N/A const MemPointer* ptr2) const {
3863N/A VMMemRegion* p1 = (VMMemRegion*)ptr1;
3863N/A VMMemRegion* p2 = (VMMemRegion*)ptr2;
3863N/A
3863N/A if (p1->addr() != p2->addr()) return false;
3863N/A if ((p1->flags() & MemPointerRecord::tag_masks) !=
3863N/A (p2->flags() & MemPointerRecord::tag_masks)) {
3863N/A return false;
3863N/A }
3863N/A // we do see multiple commit/uncommit on the same memory, it is ok
3863N/A return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
3863N/A (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
3863N/A }
3863N/A#endif
3863N/A};
3863N/A
4058N/Aclass MallocRecordIterator : public MemPointerArrayIterator {
4066N/A private:
3863N/A MemPointerArrayIteratorImpl _itr;
3863N/A
4066N/A
4066N/A
3863N/A public:
4058N/A MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
3863N/A }
3863N/A
4064N/A virtual MemPointer* current() const {
4066N/A#ifdef ASSERT
4066N/A MemPointer* cur_rec = _itr.current();
4066N/A if (cur_rec != NULL) {
4066N/A MemPointer* prev_rec = _itr.peek_prev();
4066N/A MemPointer* next_rec = _itr.peek_next();
4066N/A assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
4066N/A assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
3863N/A }
4066N/A#endif
4066N/A return _itr.current();
3863N/A }
4064N/A virtual MemPointer* next() {
4066N/A MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
4066N/A // arena memory record is a special case, which we have to compare
4066N/A // sequence number against its associated arena record.
4066N/A if (next_rec != NULL && next_rec->is_arena_memory_record()) {
4066N/A MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
4066N/A // if there is an associated arena record, it has to be previous
4066N/A // record because of sorting order (by address) - NMT generates a pseudo address
4066N/A // for arena's size record by offsetting arena's address, that guarantees
4066N/A // the order of arena record and it's size record.
4066N/A if (prev_rec != NULL && prev_rec->is_arena_record() &&
4066N/A next_rec->is_memory_record_of_arena(prev_rec)) {
4066N/A if (prev_rec->seq() > next_rec->seq()) {
4066N/A // Skip this arena memory record
4066N/A // Two scenarios:
4066N/A // - if the arena record is an allocation record, this early
4066N/A // size record must be leftover by previous arena,
4066N/A // and the last size record should have size = 0.
4066N/A // - if the arena record is a deallocation record, this
4066N/A // size record should be its cleanup record, which should
4066N/A // also have size = 0. In other world, arena alway reset
4066N/A // its size before gone (see Arena's destructor)
4066N/A assert(next_rec->size() == 0, "size not reset");
4066N/A return _itr.next();
4066N/A } else {
4066N/A assert(prev_rec->is_allocation_record(),
4066N/A "Arena size record ahead of allocation record");
4066N/A }
4066N/A }
4058N/A }
4066N/A return next_rec;
3863N/A }
3863N/A
4058N/A MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
4058N/A MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
4058N/A void remove() { ShouldNotReachHere(); }
4058N/A bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
4058N/A bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
4058N/A};
4058N/A
4064N/A// collapse duplicated records. Eliminating duplicated records here, is much
4064N/A// cheaper than during promotion phase. However, it does have limitation - it
4064N/A// can only eliminate duplicated records within the generation, there are
4064N/A// still chances seeing duplicated records during promotion.
4064N/A// We want to use the record with higher sequence number, because it has
4064N/A// more accurate callsite pc.
4066N/Aclass VMRecordIterator : public MemPointerArrayIterator {
4066N/A private:
4066N/A MemPointerArrayIteratorImpl _itr;
4066N/A
4064N/A public:
4066N/A VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
4064N/A MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
4064N/A MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
4064N/A while (next != NULL) {
4064N/A assert(cur != NULL, "Sanity check");
4064N/A assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
4064N/A "pre-sort order");
4064N/A
4064N/A if (is_duplicated_record(cur, next)) {
4064N/A _itr.next();
4064N/A next = (MemPointerRecord*)_itr.peek_next();
4064N/A } else {
4064N/A break;
4064N/A }
4064N/A }
4064N/A }
4064N/A
4064N/A virtual MemPointer* current() const {
4064N/A return _itr.current();
4064N/A }
4064N/A
4064N/A // get next record, but skip the duplicated records
4064N/A virtual MemPointer* next() {
4064N/A MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
4064N/A MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
4064N/A while (next != NULL) {
4064N/A assert(cur != NULL, "Sanity check");
4064N/A assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
4064N/A "pre-sort order");
4064N/A
4064N/A if (is_duplicated_record(cur, next)) {
4064N/A _itr.next();
4064N/A cur = next;
4064N/A next = (MemPointerRecord*)_itr.peek_next();
4064N/A } else {
4064N/A break;
4064N/A }
4064N/A }
4064N/A return cur;
4064N/A }
4064N/A
4066N/A MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
4066N/A MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
4066N/A void remove() { ShouldNotReachHere(); }
4066N/A bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
4066N/A bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
4066N/A
4064N/A private:
4064N/A bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
4064N/A bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
4064N/A assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
4064N/A return ret;
4064N/A }
4064N/A};
4064N/A
4058N/Aclass StagingArea : public _ValueObj {
4058N/A private:
4058N/A MemPointerArray* _malloc_data;
4058N/A MemPointerArray* _vm_data;
4058N/A
4058N/A public:
4058N/A StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
4058N/A init();
3863N/A }
3863N/A
4058N/A ~StagingArea() {
4058N/A if (_malloc_data != NULL) delete _malloc_data;
4058N/A if (_vm_data != NULL) delete _vm_data;
4058N/A }
4058N/A
4058N/A MallocRecordIterator malloc_record_walker() {
4058N/A return MallocRecordIterator(malloc_data());
3863N/A }
3863N/A
4064N/A VMRecordIterator virtual_memory_record_walker();
4064N/A
4058N/A bool init();
4058N/A void clear() {
4058N/A assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
4058N/A _malloc_data->shrink();
4058N/A _malloc_data->clear();
4058N/A _vm_data->clear();
3863N/A }
3863N/A
4058N/A inline MemPointerArray* malloc_data() { return _malloc_data; }
4058N/A inline MemPointerArray* vm_data() { return _vm_data; }
3863N/A};
3863N/A
3863N/Aclass MemBaseline;
3863N/Aclass MemSnapshot : public CHeapObj<mtNMT> {
3863N/A private:
3863N/A // the following two arrays contain records of all known lived memory blocks
3863N/A // live malloc-ed memory pointers
3863N/A MemPointerArray* _alloc_ptrs;
3863N/A // live virtual memory pointers
3863N/A MemPointerArray* _vm_ptrs;
3863N/A
4058N/A StagingArea _staging_area;
3863N/A
3863N/A // the lock to protect this snapshot
3863N/A Monitor* _lock;
3863N/A
4168N/A // the number of instance classes
4168N/A int _number_of_classes;
4168N/A
3863N/A NOT_PRODUCT(size_t _untracked_count;)
3863N/A friend class MemBaseline;
3863N/A
3863N/A public:
3863N/A MemSnapshot();
3863N/A virtual ~MemSnapshot();
3863N/A
3863N/A // if we are running out of native memory
4058N/A bool out_of_memory() {
4058N/A return (_alloc_ptrs == NULL ||
4058N/A _staging_area.malloc_data() == NULL ||
4058N/A _staging_area.vm_data() == NULL ||
3863N/A _vm_ptrs == NULL || _lock == NULL ||
3863N/A _alloc_ptrs->out_of_memory() ||
3863N/A _vm_ptrs->out_of_memory());
3863N/A }
3863N/A
3863N/A // merge a per-thread memory recorder into staging area
3863N/A bool merge(MemRecorder* rec);
3863N/A // promote staged data to snapshot
4168N/A bool promote(int number_of_classes);
3863N/A
4168N/A int number_of_classes() const { return _number_of_classes; }
3863N/A
3863N/A void wait(long timeout) {
3863N/A assert(_lock != NULL, "Just check");
3863N/A MonitorLockerEx locker(_lock);
3863N/A locker.wait(true, timeout);
3863N/A }
3863N/A
3863N/A NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
3863N/A NOT_PRODUCT(void check_staging_data();)
3863N/A NOT_PRODUCT(void check_malloc_pointers();)
3863N/A NOT_PRODUCT(bool has_allocation_record(address addr);)
4064N/A // dump all virtual memory pointers in snapshot
4064N/A DEBUG_ONLY( void dump_all_vm_pointers();)
3863N/A
3863N/A private:
4066N/A // copy sequenced pointer from src to dest
4066N/A void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
4066N/A // assign a sequenced pointer to non-sequenced pointer
4066N/A void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
4058N/A
4058N/A bool promote_malloc_records(MemPointerArrayIterator* itr);
4058N/A bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
3863N/A};
3863N/A
3863N/A#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP