defNewGeneration.cpp revision 196
/*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_defNewGeneration.cpp.incl"
//
// DefNewGeneration functions.
// Methods of protected closure types.
}
assert(false, "Do not call.");
}
}
}
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
}
void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
{}
do {
}
{}
do {
"Failed to finish scan");
}
{
}
{
}
{
}
int level,
const char* policy)
_promo_failure_drain_in_progress(false),
_should_allocate_from_space(false)
{
_eden_space = new ConcEdenSpace(this);
} else {
_eden_space = new EdenSpace(this);
}
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
vm_exit_during_initialization("Could not allocate a new gen space");
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
// allocate the performance counters
// Generation counters -- generation 0, 3 subspaces
}
// Compute sizes
if (eden_size < minimum_eden_size) {
// May happen due to 64Kb rounding, if so adjust eden size back up
}
// If minumum_eden_size != 0, we will not have cleared any
// portion of eden above its top. This can cause newly
// expanded space not to be mangled if using ZapUnusedHeapArea.
// We explicitly do such mangling here.
if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
eden()->mangle_unused_area();
}
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
}
void DefNewGeneration::swap_spaces() {
ContiguousSpace* s = from();
_from_space = to();
_to_space = s;
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
if (UsePerfData) {
CSpaceCounters* c = _from_counters;
_to_counters = c;
}
}
// Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of
// the generation so an expand-to-reserve should be
// unnecessary. Also a second call to expand-to-reserve
// value potentially can cause an undue expansion.
// For example if the first expand fail for unknown reasons,
// but the second succeeds and expands the heap to its maximum
// value.
}
}
return success;
}
void DefNewGeneration::compute_new_size() {
// This is called after a gc that includes the following generation
// (which is required to exist.) So from-space will normally be empty.
// Note that we check both spaces, since if scavenge failed they revert roles.
// If not we bail out (otherwise we would have to relocate the objects)
return;
}
"DefNewGeneration cannot be an oldest gen");
"just checking");
// All space sizes must be multiples of Generation::GenGrain.
// Compute desired new generation size based on NewRatio and
// NewSizeThreadIncrease
// Adjust new generation size
bool changed = false;
if (desired_new_size > new_size_before) {
changed = true;
}
// If the heap failed to expand to the desired size,
// "changed" will be false. If the expansion failed
// (and at this point it was expected to succeed),
// ignore the failure (leaving "changed" as false).
}
// bail out of shrinking if objects in eden
changed = true;
}
if (changed) {
if (WizardMode) {
}
gclog_or_tty->cr();
}
}
}
// $$$ This may be wrong in case of "scavenge failure"?
}
assert(false, "NYI -- are you sure you want to call this?");
}
}
}
}
}
}
}
}
}
bool usedOnly) {
}
// The last collection bailed out, we are running out of heap space,
// so we try to allocate the from-space, too.
" will_fail: %s"
" heap_lock: %s"
" free: " SIZE_FORMAT,
size,
}
if (Heap_lock->owned_by_self() ||
// If the Heap_lock is not locked by this thread, this will be called
// again later with the Heap_lock held.
}
}
}
return result;
}
bool is_tlab,
bool parallel) {
// We don't attempt to expand the young generation (but perhaps we should.)
}
bool clear_all_soft_refs,
bool is_tlab) {
"This must be the youngest gen, and not the only gen");
// If the next generation is too full to accomodate promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
return;
}
// Capture heap used before collection (for printing).
// These can be shared for all code paths
IsAliveClosure is_alive(this);
ScanWeakRefClosure scan_weak_ref(this);
"save marks have not been newly set.");
// Weak refs.
// FIXME: Are these storage leaks, or are they resource objects?
#ifdef COMPILER2
#else
#endif // COMPILER2
// Not very pretty.
FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true);
"save marks have not been newly set.");
true, // Process younger gens, if any, as
// strong roots.
false,// not collecting permanent generation.
// "evacuate followers".
if (!promotion_failed()) {
// Swap the survivor spaces.
swap_spaces();
// Set the desired survivor size to half the real survivor space
if (PrintGC && !PrintGCDetails) {
}
} else {
"Should not be here unless promotion failure handling is on");
// deallocate stack and it's elements
delete _promo_failure_scan_stack;
if (PrintGCDetails) {
}
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
// case there can be live objects in to-space
// as a result of a partial evacuation of eden
// and from-space.
swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect().
// Reset the PromotionFailureALot counters.
}
// set new iteration safe limit for the survivor spaces
}
class RemoveForwardPointerClosure: public ObjectClosure {
public:
}
};
_promotion_failed = false;
}
void DefNewGeneration::remove_forwarding_pointers() {
// Now restore saved marks, if any.
if (_objs_with_preserved_marks != NULL) {
for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
}
delete _objs_with_preserved_marks;
delete _preserved_marks_of_objs;
}
}
if (m->must_be_preserved_for_promotion_failure(obj)) {
if (_objs_with_preserved_marks == NULL) {
}
}
}
// forward to self
_promotion_failed = true;
if (!_promo_failure_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_promo_failure_drain_in_progress = true;
_promo_failure_drain_in_progress = false;
}
}
"shouldn't be scavenging this oop");
// Try allocating obj in to-space (unless too old)
}
// Otherwise try allocating obj tenured
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
}
return old;
}
} else {
// Prefetch beyond obj
// Copy obj
// Increment age if obj still in new generation
}
// Done, insert forward pointer to obj in this header
return obj;
}
if (_promo_failure_scan_stack == NULL) {
}
}
while (_promo_failure_scan_stack->length() > 0) {
}
}
void DefNewGeneration::save_marks() {
eden()->set_saved_mark();
to()->set_saved_mark();
from()->set_saved_mark();
}
void DefNewGeneration::reset_saved_marks() {
eden()->reset_saved_mark();
to()->reset_saved_mark();
from()->reset_saved_mark();
}
bool DefNewGeneration::no_allocs_since_save_marks() {
return to()->saved_mark_at_top();
}
\
void DefNewGeneration:: \
cl->set_generation(this); \
cl->reset_generation(); \
save_marks(); \
}
if (requestor == this || _promotion_failed) return;
/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
if (to_space->top() > to_space->bottom()) {
trace("to_space not empty when contribute_scratch called");
}
*/
if (free_words >= MinFreeScratchWords) {
}
}
bool DefNewGeneration::collection_attempt_is_safe() {
return false;
}
"This must be the youngest gen, and not the only gen");
}
// Decide if there's enough room for a full promotion
// When using extremely large edens, we effectively lose a
// large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
// flag to reduce the minimum evacuation space requirements. If
// there is not enough space to evacuate eden during a scavenge,
// the VM will immediately exit with an out of memory error.
// This flag has not been tested
// with collectors other than simple mark & sweep.
//
// Note that with the addition of promotion failure handling, the
// VM will not immediately exit but will undo the young generation
// collection. The parameter is left here for compatibility.
// worst_case_evacuation is based on "used()". For the case where this
// method is called after a collection, this is still appropriate because
// the case that needs to be detected is one in which a full collection
// has been done and has overflowed into the young generation. In that
// case a minor collection will fail (the overflow of the full collection
// means there is no space in the old generation for any promotion).
}
// Check if the heap is approaching full after a collection has
// been done. Generally the young generation is empty at
// a minimum at the end of a collection. If it is not, then
// the heap is approaching full.
if (collection_attempt_is_safe()) {
} else {
if (full) { // we seem to be running out of space
}
}
// update the generation and space performance counters
}
void DefNewGeneration::update_counters() {
if (UsePerfData) {
}
}
}
}
const char* DefNewGeneration::name() const {
return "def new generation";
}
// Moved from inline file as they are not called inline
return eden();
}
bool is_tlab) {
// This is the slow-path allocation for the DefNewGeneration.
// Most allocations are fast-path in compiled code.
// We try to allocate from the eden. If that works, we are happy.
// Note that since DefNewGeneration supports lock-free allocation, we
// have to use it here, as well.
return result;
}
do {
// Tell the next generation we reached a limit.
} else {
"invalid state after allocation_limit_reached returned null");
}
} else {
// The allocation failed and the soft limit is equal to the hard limit,
// there are no reasons to do an attempt to allocate
break;
}
// Try to allocate until succeeded or the soft limit can't be adjusted
// If the eden is full and the last collection bailed out, we are running
// out of heap space, and we try to allocate the from-space, too.
// allocate_from_space can't be inlined because that would introduce a
// circular dependency at compile time.
}
return result;
}
bool is_tlab) {
}
// Ensure that _end and _soft_end are the same in eden space.
}
}
return unsafe_max_alloc_nogc();
}