/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/mutableNUMASpace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "thread_bsd.inline.hpp"
#endif
_adaptation_cycles = 0;
_samples_count = 0;
update_layout(true);
}
for (int i = 0; i < lgrp_spaces()->length(); i++) {
delete lgrp_spaces()->at(i);
}
delete lgrp_spaces();
}
#ifndef PRODUCT
// This method should do nothing.
// It can be called on a numa space during a full compaction.
}
// This method should do nothing.
// It can be called on a numa space during a full compaction.
}
// This method should do nothing because numa spaces are not mangled.
}
assert(false, "Do not mangle MutableNUMASpace's");
}
// This method should do nothing.
}
// This method should do nothing.
}
// This method should do nothing.
}
#endif // NOT_PRODUCT
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
for (int i = 0; i < lgrp_spaces()->length(); i++) {
if (s->free_in_words() > 0) {
while (words_left_to_fill > 0) {
err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
if (!os::numa_has_static_binding()) {
#ifndef ASSERT
if (!ZapUnusedHeapArea) {
}
#endif
if (crossing_start != crossing_end) {
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
}
}
}
}
} else {
if (!os::numa_has_static_binding()) {
#ifdef ASSERT
#else
if (ZapUnusedHeapArea) {
} else {
return;
}
#endif
} else {
return;
}
}
}
}
size_t s = 0;
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
return s;
}
size_t s = 0;
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
return s;
}
if (lgrp_id == -1) {
// This case can occur after the topology of the system has
// changed. Thread can change their location, the new home
// group will be determined during the first allocation
// attempt. For now we can safely assume that all spaces
// have equal size because the whole space will be reinitialized.
if (lgrp_spaces()->length() > 0) {
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
// That's the normal case, where we know the locality group of the thread.
if (i == -1) {
return 0;
}
}
// Please see the comments for tlab_capacity().
if (lgrp_id == -1) {
if (lgrp_spaces()->length() > 0) {
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
if (i == -1) {
return 0;
}
}
if (lgrp_id == -1) {
if (lgrp_spaces()->length() > 0) {
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
if (i == -1) {
return 0;
}
}
// Check if the NUMA topology has changed. Add and remove spaces if needed.
// The update can be forced by setting the force parameter equal to true.
// Check if the topology had changed.
// Add new spaces for the new nodes
for (int i = 0; i < lgrp_num; i++) {
bool found = false;
for (int j = 0; j < lgrp_spaces()->length(); j++) {
found = true;
break;
}
}
if (!found) {
}
}
// Remove spaces for the removed nodes.
for (int i = 0; i < lgrp_spaces()->length();) {
bool found = false;
for (int j = 0; j < lgrp_num; j++) {
found = true;
break;
}
}
if (!found) {
delete lgrp_spaces()->at(i);
lgrp_spaces()->remove_at(i);
} else {
i++;
}
}
if (changed) {
}
}
return true;
}
return false;
}
// Bias region towards the first-touching lgrp. Set the right page sizes.
// First we tell the OS which page size we want in the given range. The underlying
// large page can be broken down if we require small pages.
// Then we uncommit the pages in the range.
// And make them local/first-touch biased.
}
}
// Free all pages in the region.
}
}
// Update space layout. Perform adaptation.
if (update_layout(false)) {
// If the topology has changed, make all chunks zero-sized.
// And clear the alloc-rate statistics.
// In future we may want to handle this more gracefully in order
// to avoid the reallocation of the pages as much as possible.
for (int i = 0; i < lgrp_spaces()->length(); i++) {
ls->clear_alloc_rate();
}
// A NUMA space is never mangled
initialize(region(),
} else {
bool should_initialize = false;
if (!os::numa_has_static_binding()) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
should_initialize = true;
break;
}
}
}
if (should_initialize ||
// A NUMA space is never mangled
initialize(region(),
}
}
if (NUMAStats) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
}
// Scan pages. Free pages that have smaller size or wrong placement.
{
if (pages_per_chunk > 0) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
}
// Accumulate statistics about the allocation rate of each lgrp.
if (UseAdaptiveNUMAChunkSizing) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
if (NUMAStats) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
}
// Get the current size of a chunk.
// This function computes the size of the chunk based on the
// difference between chunk ends. This allows it to work correctly in
// case the whole space is resized and during the process of adaptive
// chunk resizing.
if (i == 0) {
} else {
}
} else {
}
}
return 0;
}
// Return the default chunk size by equally diving the space.
// page_size() aligned.
}
// Produce a new chunk size. page_size() aligned.
// This function is expected to be called on sequence of i's from 0 to
// lgrp_spaces()->length().
for (int j = 0; j < i; j++) {
}
float alloc_rate = 0;
for (int j = i; j < lgrp_spaces()->length(); j++) {
}
if (alloc_rate > 0) {
}
if (limit > 0) {
if (chunk_size > current_chunk_size(i)) {
if (upper_bound > limit &&
// The resulting upper bound should not exceed the available
// amount of memory (pages_available * page_size()).
}
} else {
}
}
}
return chunk_size;
}
// Return the bottom_region and the top_region. Align them to page_size() boundary.
// |------------------new_region---------------------------------|
// |----bottom_region--|---intersection---|------top_region------|
// Is there bottom?
// Try to coalesce small pages into a large one.
if (new_region.contains(p)
if (intersection.contains(p)) {
} else {
intersection = MemRegion(p, p);
}
}
}
} else {
*bottom_region = MemRegion();
}
// Is there top?
// Try to coalesce small pages into a large one.
if (new_region.contains(p)
if (intersection.contains(p)) {
} else {
intersection = MemRegion(p, p);
}
}
}
} else {
*top_region = MemRegion();
}
}
// Try to merge the invalid region with the bottom or top region by decreasing
// the intersection area. Return the invalid_region aligned to the page_size()
// boundary if it's inside the intersection. Return non-empty invalid_region
// if it lies inside the intersection (also page-aligned).
// |------------------new_region---------------------------------|
// |----------------|-------invalid---|--------------------------|
// |----bottom_region--|---intersection---|------top_region------|
if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
*invalid_region = MemRegion();
} else
if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
*invalid_region = MemRegion();
} else
*invalid_region = MemRegion();
} else
// That's the only case we have to make an additional bias_region() call.
if (new_region.contains(p)) {
start = p;
}
end = p;
}
}
}
}
}
}
bool clear_space,
bool mangle_space,
bool setup_pages) {
// Must always clear the space
// Compute chunk sizes
size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
// Try small pages if the chunk size is too small
}
// Handle space resize
// the page size preference for the whole space.
}
}
// Check if the space layout has changed significantly?
// This happens when the space has been resized so that either head or tail
// chunk became less than a page.
current_chunk_size(0) > page_size() &&
for (int i = 0; i < lgrp_spaces()->length(); i++) {
old_region = s->region();
if (!UseAdaptiveNUMAChunkSizing ||
(UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
// No adaptation. Divide the space equally.
} else
if (!layout_valid || NUMASpaceResizeRate == 0) {
// Fast adaptation. If no space resize rate is set, resize
// the chunks instantly.
chunk_byte_size = adaptive_chunk_size(i, 0);
} else {
// Slow adaptation. Resize the chunks moving no more than
// NUMASpaceResizeRate bytes per collection.
}
}
if (i == 0) { // Bottom chunk
} else {
}
} else
} else { // Top chunk
}
// The general case:
// |---------------------|--invalid---|--------------------------|
// |------------------new_region---------------------------------|
// |----bottom_region--|---intersection---|------top_region------|
// |----old_region----|
// The intersection part has all pages in place we don't need to migrate them.
// Pages for the top and bottom part should be freed and then reallocated.
}
if (!os::numa_has_static_binding()) {
// Invalid region is a range of memory that could've possibly
// been allocated on the other node. That's relevant only on Solaris where
// there is no static memory binding.
if (!invalid_region.is_empty()) {
}
}
if (!os::numa_has_static_binding()) {
// If that's a system with the first-touch policy then it's enough
// to free the pages.
} else {
// In a system with static binding we have to change the bias whenever
// we reshape the heap.
}
// Clear space (set top = bottom) but never mangle.
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
}
}
// Set the top of the whole space.
// Mark the the holes in chunks below the top() as invalid.
bool found_top = false;
for (int i = 0; i < lgrp_spaces()->length();) {
// Check if setting the chunk's top to a given value would create a hole less than
// a minimal object; assuming that's not the last chunk in which case we don't care.
// Add a minimum size filler object; it will cross the chunk boundary.
value += min_fill_size;
// Restart the loop from the same chunk, since the value has moved
// to the next one.
continue;
}
}
}
found_top = true;
} else {
if (found_top) {
} else {
}
}
}
i++;
}
}
for (int i = 0; i < lgrp_spaces()->length(); i++) {
// Never mangle NUMA spaces because the mangling will
// bind the memory to a possibly unwanted lgroup.
}
}
/*
Linux supports static memory binding, therefore the most part of the
logic dealing with the possible invalid page allocation is effectively
disabled. Besides there is no notion of the home node in Linux. A
thread is allowed to migrate freely. Although the scheduler is rather
reluctant to move threads between the nodes. We check for the current
node every allocation. And with a high probability a thread stays on
the same node for some time allowing local access to recently allocated
objects.
*/
}
// It is possible that a new CPU has been hotplugged and
// we haven't reshaped the space accordingly.
if (i == -1) {
}
if (p != NULL) {
p = NULL;
}
}
if (p != NULL) {
}
}
// Make the page allocation happen here if there is no static binding..
*(int*)i = 0;
}
}
if (p == NULL) {
}
return p;
}
// This version is lock-free.
}
// It is possible that a new CPU has been hotplugged and
// we haven't reshaped the space accordingly.
if (i == -1) {
}
if (p != NULL) {
if (s->cas_deallocate(p, size)) {
// We were the last to allocate and created a fragment less than
// a minimal object.
p = NULL;
} else {
guarantee(false, "Deallocation should always succeed");
}
}
}
if (p != NULL) {
break;
}
}
}
// Make the page allocation happen here if there is no static binding.
*(int*)i = 0;
}
}
if (p == NULL) {
}
return p;
}
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
}
for (int i = 0; i < lgrp_spaces()->length(); i++) {
if (NUMAStats) {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
}
}
}
}
// This can be called after setting an arbitary value to the space's top,
// so an object can cross the chunk boundary. We ensure the parsablity
// of the space and just walk the objects in linear fashion.
MutableSpace::verify();
}
// Scan pages and gather stats about page placement and size.
space_stats()->_large_pages++;
} else {
space_stats()->_small_pages++;
}
} else {
}
} else {
p += os::vm_page_size();
}
} else {
return;
}
}
}
}
// Scan page_count pages and verify if they have the right size and right placement.
// If invalid pages are found they are freed in hope that subsequent reallocation
// will be more successful.
{
}
char *s = scan_start;
while (s < scan_end) {
if (e == NULL) {
break;
}
if (e != scan_end) {
&& page_expected.size != 0) {
}
}
s = e;
}
}