3863N/A/*
4168N/A * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
3863N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3863N/A *
3863N/A * This code is free software; you can redistribute it and/or modify it
3863N/A * under the terms of the GNU General Public License version 2 only, as
3863N/A * published by the Free Software Foundation.
3863N/A *
3863N/A * This code is distributed in the hope that it will be useful, but WITHOUT
3863N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3863N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
3863N/A * version 2 for more details (a copy is included in the LICENSE file that
3863N/A * accompanied this code).
3863N/A *
3863N/A * You should have received a copy of the GNU General Public License version
3863N/A * 2 along with this work; if not, write to the Free Software Foundation,
3863N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
3863N/A *
3863N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
3863N/A * or visit www.oracle.com if you need additional information or have any
3863N/A * questions.
3863N/A *
3863N/A */
3863N/A#include "precompiled.hpp"
3863N/A#include "memory/allocation.hpp"
4477N/A#include "runtime/safepoint.hpp"
4478N/A#include "runtime/thread.hpp"
3863N/A#include "services/memBaseline.hpp"
3863N/A#include "services/memTracker.hpp"
3863N/A
4477N/A
3863N/AMemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
3863N/A {mtJavaHeap, "Java Heap"},
3863N/A {mtClass, "Class"},
3863N/A {mtThreadStack,"Thread Stack"},
3863N/A {mtThread, "Thread"},
3863N/A {mtCode, "Code"},
3863N/A {mtGC, "GC"},
3863N/A {mtCompiler, "Compiler"},
3863N/A {mtInternal, "Internal"},
3863N/A {mtOther, "Other"},
3863N/A {mtSymbol, "Symbol"},
3863N/A {mtNMT, "Memory Tracking"},
4141N/A {mtTracing, "Tracing"},
3863N/A {mtChunk, "Pooled Free Chunks"},
4064N/A {mtClassShared,"Shared spaces for classes"},
4186N/A {mtTest, "Test"},
3863N/A {mtNone, "Unknown"} // It can happen when type tagging records are lagging
3863N/A // behind
3863N/A};
3863N/A
3863N/AMemBaseline::MemBaseline() {
3863N/A _baselined = false;
3863N/A
3863N/A for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
3863N/A _malloc_data[index].set_type(MemType2NameMap[index]._flag);
3863N/A _vm_data[index].set_type(MemType2NameMap[index]._flag);
3863N/A _arena_data[index].set_type(MemType2NameMap[index]._flag);
3863N/A }
3863N/A
3863N/A _malloc_cs = NULL;
3863N/A _vm_cs = NULL;
4064N/A _vm_map = NULL;
3863N/A
3863N/A _number_of_classes = 0;
3863N/A _number_of_threads = 0;
3863N/A}
3863N/A
3863N/A
3863N/Avoid MemBaseline::clear() {
3863N/A if (_malloc_cs != NULL) {
3863N/A delete _malloc_cs;
3863N/A _malloc_cs = NULL;
3863N/A }
3863N/A
3863N/A if (_vm_cs != NULL) {
3863N/A delete _vm_cs;
3863N/A _vm_cs = NULL;
3863N/A }
3863N/A
4064N/A if (_vm_map != NULL) {
4064N/A delete _vm_map;
4064N/A _vm_map = NULL;
4064N/A }
4064N/A
3863N/A reset();
3863N/A}
3863N/A
3863N/A
3863N/Avoid MemBaseline::reset() {
3863N/A _baselined = false;
3863N/A _total_vm_reserved = 0;
3863N/A _total_vm_committed = 0;
3863N/A _total_malloced = 0;
3863N/A _number_of_classes = 0;
3863N/A
3863N/A if (_malloc_cs != NULL) _malloc_cs->clear();
3863N/A if (_vm_cs != NULL) _vm_cs->clear();
4064N/A if (_vm_map != NULL) _vm_map->clear();
3863N/A
3863N/A for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
3863N/A _malloc_data[index].clear();
3863N/A _vm_data[index].clear();
3863N/A _arena_data[index].clear();
3863N/A }
3863N/A}
3863N/A
3863N/AMemBaseline::~MemBaseline() {
4064N/A clear();
3863N/A}
3863N/A
3863N/A// baseline malloc'd memory records, generate overall summary and summaries by
3863N/A// memory types
3863N/Abool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
4064N/A MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
4064N/A MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
3863N/A size_t used_arena_size = 0;
3863N/A int index;
4064N/A while (malloc_ptr != NULL) {
4064N/A index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
4064N/A size_t size = malloc_ptr->size();
4066N/A if (malloc_ptr->is_arena_memory_record()) {
4066N/A // We do have anonymous arenas, they are either used as value objects,
4066N/A // which are embedded inside other objects, or used as stack objects.
4066N/A _arena_data[index].inc(size);
4066N/A used_arena_size += size;
4066N/A } else {
4066N/A _total_malloced += size;
4066N/A _malloc_data[index].inc(size);
4066N/A if (malloc_ptr->is_arena_record()) {
4066N/A // see if arena memory record present
4066N/A MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
4574N/A if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
4066N/A assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
4066N/A "Arena records do not match");
4066N/A size = next_malloc_ptr->size();
4066N/A _arena_data[index].inc(size);
4066N/A used_arena_size += size;
4066N/A malloc_itr.next();
4066N/A }
3863N/A }
3863N/A }
4064N/A malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
3863N/A }
3863N/A
3863N/A // substract used arena size to get size of arena chunk in free list
3863N/A index = flag2index(mtChunk);
3863N/A _malloc_data[index].reduce(used_arena_size);
3863N/A // we really don't know how many chunks in free list, so just set to
3863N/A // 0
3863N/A _malloc_data[index].overwrite_counter(0);
3863N/A
3863N/A return true;
3863N/A}
3863N/A
4477N/A// check if there is a safepoint in progress, if so, block the thread
4477N/A// for the safepoint
4477N/Avoid MemBaseline::check_safepoint(JavaThread* thr) {
4477N/A if (SafepointSynchronize::is_synchronizing()) {
4478N/A // grab and drop the SR_lock to honor the safepoint protocol
4478N/A MutexLocker ml(thr->SR_lock());
4477N/A }
4477N/A}
4477N/A
3863N/A// baseline mmap'd memory records, generate overall summary and summaries by
3863N/A// memory types
3863N/Abool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
4064N/A MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
4064N/A VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
3863N/A int index;
4064N/A while (vm_ptr != NULL) {
4064N/A if (vm_ptr->is_reserved_region()) {
4064N/A index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
3863N/A // we use the number of thread stack to count threads
4064N/A if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
3863N/A _number_of_threads ++;
3863N/A }
4064N/A _total_vm_reserved += vm_ptr->size();
4064N/A _vm_data[index].inc(vm_ptr->size(), 0);
4064N/A } else {
4064N/A _total_vm_committed += vm_ptr->size();
4064N/A _vm_data[index].inc(0, vm_ptr->size());
4064N/A }
4064N/A vm_ptr = (VMMemRegion*)vm_itr.next();
3863N/A }
3863N/A return true;
3863N/A}
3863N/A
3863N/A// baseline malloc'd memory by callsites, but only the callsites with memory allocation
3863N/A// over 1KB are stored.
3863N/Abool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
3863N/A assert(MemTracker::track_callsite(), "detail tracking is off");
3863N/A
4064N/A MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
4064N/A MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
4064N/A MallocCallsitePointer malloc_callsite;
3863N/A
4064N/A // initailize malloc callsite array
3863N/A if (_malloc_cs == NULL) {
3863N/A _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
3863N/A // out of native memory
4064N/A if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
3863N/A return false;
3863N/A }
3863N/A } else {
3863N/A _malloc_cs->clear();
3863N/A }
3863N/A
4064N/A MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
4064N/A
4064N/A // sort into callsite pc order. Details are aggregated by callsites
4064N/A malloc_data->sort((FN_SORT)malloc_sort_by_pc);
4064N/A bool ret = true;
4064N/A
3863N/A // baseline memory that is totaled over 1 KB
4064N/A while (malloc_ptr != NULL) {
4066N/A if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
3863N/A // skip thread stacks
4064N/A if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
4064N/A if (malloc_callsite.addr() != malloc_ptr->pc()) {
4064N/A if ((malloc_callsite.amount()/K) > 0) {
4064N/A if (!_malloc_cs->append(&malloc_callsite)) {
4064N/A ret = false;
4064N/A break;
3863N/A }
3863N/A }
4064N/A malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
3863N/A }
4064N/A malloc_callsite.inc(malloc_ptr->size());
3863N/A }
3863N/A }
4064N/A malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
3863N/A }
3863N/A
4064N/A // restore to address order. Snapshot malloc data is maintained in memory
4064N/A // address order.
4064N/A malloc_data->sort((FN_SORT)malloc_sort_by_addr);
4064N/A
4064N/A if (!ret) {
4064N/A return false;
4064N/A }
4064N/A // deal with last record
4064N/A if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
4064N/A if (!_malloc_cs->append(&malloc_callsite)) {
3863N/A return false;
3863N/A }
3863N/A }
3863N/A return true;
3863N/A}
3863N/A
3863N/A// baseline mmap'd memory by callsites
3863N/Abool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
3863N/A assert(MemTracker::track_callsite(), "detail tracking is off");
3863N/A
4064N/A VMCallsitePointer vm_callsite;
4064N/A VMCallsitePointer* cur_callsite = NULL;
4064N/A MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
4064N/A VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
3863N/A
4064N/A // initialize virtual memory map array
4064N/A if (_vm_map == NULL) {
4064N/A _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
4064N/A if (_vm_map == NULL || _vm_map->out_of_memory()) {
4064N/A return false;
4064N/A }
4064N/A } else {
4064N/A _vm_map->clear();
4064N/A }
4064N/A
4064N/A // initialize virtual memory callsite array
3863N/A if (_vm_cs == NULL) {
3863N/A _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
4064N/A if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
3863N/A return false;
3863N/A }
3863N/A } else {
3863N/A _vm_cs->clear();
3863N/A }
3863N/A
4064N/A // consolidate virtual memory data
4064N/A VMMemRegionEx* reserved_rec = NULL;
4064N/A VMMemRegionEx* committed_rec = NULL;
4064N/A
4064N/A // vm_ptr is coming in increasing base address order
4064N/A while (vm_ptr != NULL) {
4064N/A if (vm_ptr->is_reserved_region()) {
4064N/A // consolidate reserved memory regions for virtual memory map.
4064N/A // The criteria for consolidation is:
4064N/A // 1. two adjacent reserved memory regions
4064N/A // 2. belong to the same memory type
4064N/A // 3. reserved from the same callsite
4064N/A if (reserved_rec == NULL ||
4064N/A reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
4064N/A FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
4064N/A reserved_rec->pc() != vm_ptr->pc()) {
4064N/A if (!_vm_map->append(vm_ptr)) {
3863N/A return false;
3863N/A }
4064N/A // inserted reserved region, we need the pointer to the element in virtual
4064N/A // memory map array.
4064N/A reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
4064N/A } else {
4064N/A reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
3863N/A }
4064N/A
4064N/A if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
3863N/A return false;
3863N/A }
4064N/A vm_callsite = VMCallsitePointer(vm_ptr->pc());
4064N/A cur_callsite = &vm_callsite;
4064N/A vm_callsite.inc(vm_ptr->size(), 0);
4064N/A } else {
4064N/A // consolidate committed memory regions for virtual memory map
4064N/A // The criterial is:
4064N/A // 1. two adjacent committed memory regions
4064N/A // 2. committed from the same callsite
4064N/A if (committed_rec == NULL ||
4064N/A committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
4064N/A committed_rec->pc() != vm_ptr->pc()) {
4064N/A if (!_vm_map->append(vm_ptr)) {
4064N/A return false;
4477N/A }
4064N/A committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
4064N/A } else {
4064N/A committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
4064N/A }
4064N/A vm_callsite.inc(0, vm_ptr->size());
4064N/A }
4064N/A vm_ptr = (VMMemRegionEx*)vm_itr.next();
4064N/A }
4064N/A // deal with last record
4064N/A if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
4064N/A return false;
4064N/A }
4064N/A
4064N/A // sort it into callsite pc order. Details are aggregated by callsites
4064N/A _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
4064N/A
4064N/A // walk the array to consolidate record by pc
4064N/A MemPointerArrayIteratorImpl itr(_vm_cs);
4064N/A VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
4064N/A VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
4064N/A while (next_rec != NULL) {
4064N/A assert(callsite_rec != NULL, "Sanity check");
4064N/A if (next_rec->addr() == callsite_rec->addr()) {
4064N/A callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
4064N/A itr.remove();
4064N/A next_rec = (VMCallsitePointer*)itr.current();
4064N/A } else {
4064N/A callsite_rec = next_rec;
4064N/A next_rec = (VMCallsitePointer*)itr.next();
4064N/A }
4064N/A }
4064N/A
3863N/A return true;
3863N/A}
3863N/A
3863N/A// baseline a snapshot. If summary_only = false, memory usages aggregated by
3863N/A// callsites are also baselined.
4477N/A// The method call can be lengthy, especially when detail tracking info is
4477N/A// requested. So the method checks for safepoint explicitly.
3863N/Abool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
4477N/A Thread* THREAD = Thread::current();
4477N/A assert(THREAD->is_Java_thread(), "must be a JavaThread");
4477N/A MutexLocker snapshot_locker(snapshot._lock);
3863N/A reset();
4477N/A _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
4477N/A if (_baselined) {
4477N/A check_safepoint((JavaThread*)THREAD);
4477N/A _baselined = baseline_vm_summary(snapshot._vm_ptrs);
4477N/A }
4168N/A _number_of_classes = snapshot.number_of_classes();
3863N/A
3863N/A if (!summary_only && MemTracker::track_callsite() && _baselined) {
4477N/A check_safepoint((JavaThread*)THREAD);
4477N/A _baselined = baseline_malloc_details(snapshot._alloc_ptrs);
4477N/A if (_baselined) {
4477N/A check_safepoint((JavaThread*)THREAD);
4477N/A _baselined = baseline_vm_details(snapshot._vm_ptrs);
4477N/A }
3863N/A }
3863N/A return _baselined;
3863N/A}
3863N/A
3863N/A
3863N/Aint MemBaseline::flag2index(MEMFLAGS flag) const {
3863N/A for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
3863N/A if (MemType2NameMap[index]._flag == flag) {
3863N/A return index;
3863N/A }
3863N/A }
3863N/A assert(false, "no type");
3863N/A return -1;
3863N/A}
3863N/A
3863N/Aconst char* MemBaseline::type2name(MEMFLAGS type) {
3863N/A for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
3863N/A if (MemType2NameMap[index]._flag == type) {
3863N/A return MemType2NameMap[index]._name;
3863N/A }
3863N/A }
4064N/A assert(false, err_msg("bad type %x", type));
3863N/A return NULL;
3863N/A}
3863N/A
3863N/A
3863N/AMemBaseline& MemBaseline::operator=(const MemBaseline& other) {
3863N/A _total_malloced = other._total_malloced;
3863N/A _total_vm_reserved = other._total_vm_reserved;
3863N/A _total_vm_committed = other._total_vm_committed;
3863N/A
3863N/A _baselined = other._baselined;
3863N/A _number_of_classes = other._number_of_classes;
3863N/A
3863N/A for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
3863N/A _malloc_data[index] = other._malloc_data[index];
3863N/A _vm_data[index] = other._vm_data[index];
3863N/A _arena_data[index] = other._arena_data[index];
3863N/A }
3863N/A
3863N/A if (MemTracker::track_callsite()) {
3863N/A assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
3863N/A assert(other._malloc_cs != NULL && other._vm_cs != NULL,
3863N/A "not properly baselined");
3863N/A _malloc_cs->clear();
3863N/A _vm_cs->clear();
3863N/A int index;
3863N/A for (index = 0; index < other._malloc_cs->length(); index ++) {
3863N/A _malloc_cs->append(other._malloc_cs->at(index));
3863N/A }
3863N/A
3863N/A for (index = 0; index < other._vm_cs->length(); index ++) {
3863N/A _vm_cs->append(other._vm_cs->at(index));
3863N/A }
3863N/A }
3863N/A return *this;
3863N/A}
3863N/A
3863N/A/* compare functions for sorting */
3863N/A
3863N/A// sort snapshot malloc'd records in callsite pc order
3863N/Aint MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
3863N/A assert(MemTracker::track_callsite(),"Just check");
3863N/A const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
3863N/A const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
3863N/A return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
3863N/A}
3863N/A
3863N/A// sort baselined malloc'd records in size order
3863N/Aint MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
3863N/A assert(MemTracker::is_on(), "Just check");
3863N/A const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
3863N/A const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
3863N/A return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
3863N/A}
3863N/A
3863N/A// sort baselined malloc'd records in callsite pc order
3863N/Aint MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
3863N/A assert(MemTracker::is_on(), "Just check");
3863N/A const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
3863N/A const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
3863N/A return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
3863N/A}
3863N/A
3863N/A
3863N/A// sort baselined mmap'd records in size (reserved size) order
3863N/Aint MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
3863N/A assert(MemTracker::is_on(), "Just check");
3863N/A const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
3863N/A const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
3863N/A return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
3863N/A}
3863N/A
3863N/A// sort baselined mmap'd records in callsite pc order
3863N/Aint MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
3863N/A assert(MemTracker::is_on(), "Just check");
3863N/A const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
3863N/A const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
3863N/A return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
3863N/A}
3863N/A
3863N/A
3863N/A// sort snapshot malloc'd records in memory block address order
3863N/Aint MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
3863N/A assert(MemTracker::is_on(), "Just check");
3863N/A const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
3863N/A const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
3863N/A int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
3863N/A assert(delta != 0, "dup pointer");
3863N/A return delta;
3863N/A}
3863N/A