Lines Matching defs:region

53 // The SplitInfo class holds the information needed to 'split' a source region
55 // all the live data in a region is copied to a single destination space (e.g.,
56 // everything live in a region in eden is copied entirely into the old gen).
59 // requires finding a region that does not contain a partial object (i.e., no
60 // live object crosses the region boundary) somewhere near the last object that
62 // region, splitting is necessary for predictable behavior.
64 // A region is always split at the end of the partial object. This avoids
71 // Split points are identified during the summary phase, when region
80 // the partial object on the split region will be copied across a destination
81 // region boundary. This test is made once each time a region is filled, and is
87 // Only regions with partial objects are split; a region without a partial
90 // At most one region is split per space, so the amount of data required is
93 // A region is split only when the destination space would overflow. Once that
103 // source spaces to be "joined" in a single destination region. At the very
105 // join and skip to an out-of-order source region. If the join region was also
106 // the last destination region to which a split region was copied (the most
108 // stop iteration and switch to a new source region at the right point. Basic
113 // destination region with a dummy object and continue filling the next
114 // destination region.
120 // recorded). The very first region cannot have a partial object and thus is
124 // Return true if this split holds data for the specified source region.
127 // The index of the split region, the size of the partial object on that
128 // region and the destination of the partial object.
135 // remainder of the source region.
139 // destination region, this is the address of the destination region;
144 // destination region, this is the address of that word within the partial
148 // Record the data necessary to split the region src_region_idx.
185 // Where the dense prefix ends, or the compacted region begins.
219 // Mask for the bits in a size_t to get an offset within a region.
221 // Mask for the bits in a pointer to get an offset within a region.
223 // Mask for the bits in a pointer to get the address of the start of a region.
240 // Destination address of the region.
243 // The first region containing data destined for this region.
246 // The object (if any) starting in this region and ending in a different
247 // region that could not be updated during the main (parallel) compaction
249 // extends onto a source region. However, the two uses do not overlap in
253 // The starting address of the partial object extending onto the region.
256 // Size of the partial object extending onto the region (words).
259 // Size of live data that lies within this region due to objects that start
260 // in this region (words). This does not include the partial object
261 // extending onto the region (if any), or the part of an object that extends
262 // onto the next region (if any).
265 // Total live data that lies within the region (words).
269 // this region will be copied. At the end of the summary phase, the valid
272 // 0 - data from the region will be compacted completely into itself, or the
273 // region is empty. The region can be claimed and then filled.
274 // 1 - data from the region will be compacted into 1 other region; some
275 // data from the region may also be compacted into the region itself.
276 // 2 - data from the region will be copied to 2 other regions.
282 // A region is claimed for processing by atomically changing the
283 // destination_count to the claimed value (dc_claimed). After a region has
289 // Whether the block table for this region has been filled.
295 // The location of the java heap data that corresponds to this region.
298 // The highest address referenced by objects in this region.
301 // Whether this region is available to be claimed, has been claimed, or has
304 // Minor subtlety: claimed() returns true if the region is marked
305 // completed(), which is desirable since a region must be claimed before it
313 void set_source_region(size_t region) { _source_region = region; }
334 // The type used to represent object sizes within a region.
365 uint _pushed; // 0 until region is pushed onto a stack
392 // Convert region indices to/from RegionData pointers.
393 inline RegionData* region(size_t region_idx) const;
394 inline size_t region(const RegionData* const region_ptr) const;
404 // destination of region n is simply the start of region n. The argument beg
405 // must be region-aligned; end need not be.
423 // Return the number of words between addr and the start of the region
427 // Convert addresses to/from a region index or region pointer.
430 inline HeapWord* region_to_addr(size_t region) const;
431 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
432 inline HeapWord* region_to_addr(const RegionData* region) const;
446 inline size_t region_to_block_idx(size_t region) const;
569 // MT-unsafe claiming of a region. Should only be used during single threaded
605 ParallelCompactData::region(size_t region_idx) const
612 ParallelCompactData::region(const RegionData* const region_ptr) const
644 return region(addr_to_region_idx(addr));
648 ParallelCompactData::region_to_addr(size_t region) const
650 assert(region <= _region_count, "region out of range");
651 return _region_start + (region << Log2RegionSize);
655 ParallelCompactData::region_to_addr(const RegionData* region) const
657 return region_to_addr(pointer_delta(region, _region_data,
662 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
664 assert(region <= _region_count, "region out of range");
666 return region_to_addr(region) + offset;
721 ParallelCompactData::region_to_block_idx(size_t region) const
723 return region << Log2BlocksPerRegion;
862 // A space that is being collected is divided into regions and with each region
863 // is associated an object of type ParallelCompactData. Each region is of a
865 // of objects at the front and back of the region.
867 // region -----+---------------------+----------
872 // by the region. This size includes the part of any live object spanning onto
873 // the region (part of AAA if it is live) from the front, all live objects
874 // contained in the region (BBB and/or CCC if they are live), and the part of
875 // any live objects covered by the region that extends off the region (part of
879 // covered by a region.
881 // The summary phase calculates the total live data to the left of each region
884 // each region XXX quantites such as
886 // - the amount of live data at the beginning of a region from an object
887 // entering the region.
888 // - the location of the first live data on the region
902 // A current exception is that objects that cross a region boundary are moved
905 // has been updated. KKK likely resides in a region to the left of the region
910 // Compaction is done on a region basis. A region that is ready to be filled is
911 // put on a ready list and GC threads take region off the list and fill them. A
912 // region is ready to be filled if it empty of live objects. Such a region may
914 // its live objects copied out already. A region that compacts into itself is
917 // region that can be put on the ready list. The regions are atomically added
1065 // Find the first (left-most) region in the range [beg, end) that has at least
1067 // region in the space that is not completely live.
1072 // Return a pointer to the first region in the range [beg, end) that is not
1077 // Return a value indicating the benefit or 'yield' if the compacted region
1079 // candidate region. Higher values are better.
1083 // updating references in the compacted region.
1095 static inline bool dead_space_crosses_boundary(const RegionData* region,
1109 // Fill the region [start, start + words) with live object(s). Only usable
1148 // Add region stealing tasks to the task queue.
1252 // Process the end of the given region range in the dense prefix.
1261 // Update a region in the dense prefix. For each live object
1262 // in the region, update it's interior references. For each
1264 // of a region range will be filled to the start of the next
1278 // aligned to a region boundary.
1283 // Determine the next source region, set closure.source() to the start of the
1284 // new region return the region index. Parameter end_addr is the address one
1293 // Decrement the destination count for each non-empty source region in the
1294 // range [beg_region, region(region_align_up(end_addr))). If the destination
1295 // count for a region goes to 0 and it needs to be filled, enqueue it.
1301 // Fill a region, copying objects from one or more source regions.
1303 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1304 fill_region(cm, region);
1307 // Fill in the block table for the specified region.
1465 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1468 assert(bit > 0, "cannot call this for the first bit/region");
1469 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1473 // onto the region, (2) an object does not start at the beginning of the
1474 // region, and (3) an object does not end at the end of the prior region.
1475 return region->partial_obj_size() == 0 &&