/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/threadCritical.hpp"
#include "services/memTracker.hpp"
#include "utilities/ostream.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
switch (type) {
case C_HEAP:
break;
case RESOURCE_AREA:
// new(size) sets allocation type RESOURCE_AREA.
break;
default:
}
return res;
}
void ResourceObj::operator delete(void* p) {
"delete only allowed for C_HEAP objects");
FreeHeap(p);
}
#ifdef ASSERT
// Set allocation type in the resource object
if (type != STACK_OR_EMBEDDED) {
// Called from operator new() and CollectionSetChooser(),
// set verification value.
}
}
}
return get_allocation_type() == type &&
}
// Operator new() is not called for allocations
// on stack and for embedded objects.
} else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
// For some reason we got a value which resembles
// an embedded or stack object (operator new() does not
// set such type). Keep it since it is valid value
// (even if it was garbage).
// Ignore garbage in other fields.
} else if (is_type_set()) {
// Operator new() was called and type was set.
err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
} else {
// Operator new() was not called.
// Assume that it is embedded or stack object.
}
}
// Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
// Note: garbage may resembles valid value.
err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
}
// Used in InlineTree::ok_to_inline() for WarmCallInfo.
err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
// Keep current _allocation_t value;
return *this;
}
ResourceObj::~ResourceObj() {
// allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
}
}
#endif // ASSERT
// A lock is not needed here - tty uses a lock internally
tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
}
void trace_heap_free(void* p) {
// A lock is not needed here - tty uses a lock internally
}
//--------------------------------------------------------------------------------------
// ChunkPool implementation
// NB: not using Mutex because pools are used before Threads are initialized
// Our three static pools
// return first element or null
void* get_first() {
if (_first) {
_num_chunks--;
}
return c;
}
public:
// All chunks in a ChunkPool has the same size
// Allocate a new chunk from the pool (might expand the pool)
void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
// should be done outside ThreadCritical lock due to NMT
_num_used++;
p = get_first();
}
}
return p;
}
// Return a chunk to the pool
_num_used--;
// Add chunk to list
_num_chunks++;
}
// Prune the pool
{
// if we have more than n chunks, free all of them
if (_num_chunks > n) {
// free chunks at end of queue, for better locality
_num_chunks = n;
}
}
}
// Free all remaining chunks, outside of ThreadCritical
// to avoid deadlock with NMT
}
}
// Accessors to preallocated pool's
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
static void initialize() {
}
static void clean() {
}
};
void chunkpool_init() {
ChunkPool::initialize();
}
void
}
//--------------------------------------------------------------------------------------
// ChunkPoolCleaner implementation
//
public:
void task() {
}
};
//--------------------------------------------------------------------------------------
// Chunk implementation
// requested_size is equal to sizeof(Chunk) but in order for the arena
// allocations to come out aligned as expected the size must be aligned
// to expected arean alignment.
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
switch (length) {
default: {
}
return p;
}
}
}
void Chunk::operator delete(void* p) {
switch (c->length()) {
}
}
}
Chunk *k = this;
while( k ) {
// clear out this chunk (to detect allocation bugs)
delete k; // Free chunk (was malloc'd)
k = tmp;
}
}
}
#ifdef ASSERT
static bool task_created = false;
task_created = true;
#endif
}
//------------------------------Arena------------------------------------------
}
}
// workaround rare racing condition, which could double count
// the arena size by native memory tracking
// Destroy original arena
reset();
return copy; // Return Arena with contents
}
}
assert(false, "Use dynamic memory type binding");
return NULL;
}
assert(false, "Use dynamic memory type binding");
return NULL;
}
// dynamic memory type binding
#ifdef ASSERT
return p;
#else
#endif
}
#ifdef ASSERT
return p;
#else
#endif
}
void Arena::operator delete(void* p) {
FreeHeap(p);
}
// Destroy this arenas contents and reset to empty
}
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
reset();
}
// This is high traffic method, but many calls actually don't
// change the size
if (_size_in_bytes != size) {
}
}
// Total of all Chunks in arena
while( k != _chunk) { // Whilst have Chunks in a row
k = k->next(); // Bump along to next Chunk
}
return sum; // Return total consumed space.
}
}
// Grow a new Chunk
// Get minimal required size. Either real big, or even bigger for giant objs
return NULL;
}
_hwm += x;
return result;
}
// Reallocate storage in Arena.
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
#ifdef ASSERT
if (UseMallocOnly) {
// always allocate a new object (otherwise we'll free this one twice)
return NULL;
}
return copy;
}
#endif
// Stupid fast special case
return c_old;
}
// make sure that new_size is legal
// See if we can resize in-place
return c_old; // Return old pointer
}
// Oops, got to relocate guts
return NULL;
}
return new_ptr;
}
// Determine if pointer belongs to this Arena or not.
#ifdef ASSERT
if (UseMallocOnly) {
// really slow, but not easy to make fast
if (*p == ptr) return true;
}
if (c == _chunk) continue; // current chunk has been processed
if (*p == ptr) return true;
}
}
return false;
}
#endif
return true; // Check for in this chunk
if (c == _chunk) continue; // current chunk has been processed
return true; // Check for every chunk in Arena
}
}
return false; // Not in any Chunk, so not in Arena
}
#ifdef ASSERT
// use malloc, but save pointer in res. area for later freeing
}
// for debugging with UseMallocOnly
check_for_overflow(x, "Arena::internal_malloc_4");
return grow(x);
} else {
_hwm += x;
return old;
}
}
#endif
//--------------------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
// The global operator new should never be called since it will usually indicate
// a memory leak. Use CHeapObj as the base class of such objects to make it explicit
// that they're allocated on the C heap.
// Commented out in product version to avoid conflicts with third-party C++ native code.
// %% note this is causing a problem on solaris debug build. the global
// new is being called from jdk source and causing data corruption.
// src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
// define CATCH_OPERATOR_NEW_USAGE if you want to use this.
#ifdef CATCH_OPERATOR_NEW_USAGE
static bool warned = false;
if (!warned && warn_new_operator)
warning("should not call global (default) operator new");
warned = true;
}
#endif
}
}
}
}
// debugging code
}
// free all objects malloced since resource mark was created; resource area
// contains their addresses
// this chunk is full, and some others too
}
}
} else {
// this chunk was partially used
}
}
#ifdef ASSERT
#endif
}
#ifdef ASSERT
fatal("allocation bug: array could grow within nested ResourceMark");
}
#endif
}
#endif // Non-product