Lines Matching refs:size

127     // a chunk of any size.
129 _smallLinearAllocBlock.set(addr, fc->size() ,
130 1024*SmallForLinearAlloc, fc->size());
168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
174 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
176 size_t adjusted_size = adjustObjectSize(size);
181 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
197 // (i.e., cp->space may no longer be "this" so adjust the size again.
200 adjusted_size = cp->space->adjust_object_size_v(size);
247 // to map directly from the object size to the array element.
259 assert(_indexedFreeList[i].size() == (size_t) i,
278 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
285 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
307 assert(fc->size() == mr.word_size(),
311 _smallLinearAllocBlock.set(addr, fc->size() ,
312 1024*SmallForLinearAlloc, fc->size());
457 FreeList<FreeChunk>::print_labels_on(st, "size");
503 assert(sz != 0, "Should always be able to compute a size");
506 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
516 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
824 obj_addr += fc->size();
908 // return a correct size so that the next addr + size below gives us a
916 size_t size;
918 addr < last; addr += size) {
924 size = fc->size();
930 size = cl->do_object_careful(oop(addr));
931 if (size == 0) {
942 // return a correct size so that the next addr + size below gives us a
958 size_t size;
960 addr < end; addr += size) {
966 size = fc->size();
972 size = cl->do_object_careful_m(oop(addr), mr);
973 if (size == 0) {
1001 size_t res = fc->size();
1002 // If the object is still a free chunk, return the size, else it
1005 assert(res != 0, "Block size should not be 0");
1018 assert(res != 0, "Block size should not be 0");
1029 // this variant may return a zero size for a block that is
1030 // under mutation and for which a consistent size cannot be
1044 size_t res = fc->size();
1046 assert(res != 0, "Block size should not be 0");
1053 // We trust the size of any object that has a non-NULL
1062 assert(res != 0, "Block size should not be 0");
1079 return fc->size();
1085 return adjustObjectSize(oop(p)->size());
1182 size_t size = 0;
1197 size += i * _indexedFreeList[i].count();
1199 return size;
1202 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1204 return allocate(size);
1208 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1209 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1212 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1215 assert(size == adjustObjectSize(size),
1219 res = allocate_adaptive_freelists(size);
1221 res = allocate_non_adaptive_freelists(size);
1235 _bt.verify_single_block(res, size);
1236 _bt.verify_not_unallocated(res, size);
1238 debug_only(fc->mangleAllocated(size));
1244 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1247 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1249 res = getChunkFromSmallLinearAllocBlock(size);
1253 if (size < SmallForDictionary) {
1254 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1258 res = (HeapWord*)getChunkFromDictionaryExact(size);
1265 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1268 assert(size == adjustObjectSize(size),
1273 // exact size from small object indexed list if small
1279 // Try allocating exact size from indexTable first
1280 if (size < IndexSetSize) {
1281 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1283 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1289 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1290 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1295 // Don't record chunk off a LinAB? smallSplitBirth(size);
1297 // Raid the exact free lists larger than size, even if they are not
1299 res = (HeapWord*) getChunkFromGreater(size);
1303 res = (HeapWord*) getChunkFromDictionaryExact(size);
1308 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1318 // Depending on the object size, expansion may require refilling either a
1356 assert(ret->size() - numWords >= MinChunkSize,
1358 _bt.allocated((HeapWord*)ret, ret->size());
1372 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1373 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1378 (_smallLinearAllocBlock._word_size == fc->size()),
1379 "Linear allocation block shows incorrect size");
1381 (_smallLinearAllocBlock._word_size == fc->size()));
1385 // allocation block, the size-indexed table of (smaller) free blocks,
1390 } else if (fc->size() < IndexSetSize) {
1407 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1417 fc = getChunkFromDictionary(size);
1424 _bt.verify_single_block((HeapWord*)fc, fc->size());
1425 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1431 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1459 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1461 assert(size >= MinChunkSize, "minimum chunk size");
1462 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1464 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1469 size_t size) {
1471 assert(size >= MinChunkSize, "too small");
1482 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1486 if (blk->_word_size == size) { // exactly satisfied
1489 } else if (size + MinChunkSize <= blk->_refillSize) {
1491 // Update _unallocated_block if the size is such that chunk would be
1510 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1513 split_birth(size);
1518 blk->_word_size -= size;
1519 blk->_ptr += size;
1520 split_birth(size);
1526 _bt.split_block(res, blk_size, size); // adjust block offset table
1533 size_t size) {
1535 assert(size >= MinChunkSize, "too small");
1539 if (blk->_word_size >= size + MinChunkSize) {
1547 blk->_word_size -= size;
1548 blk->_ptr += size;
1549 split_birth(size);
1555 _bt.split_block(res, blk_size, size); // adjust block offset table
1556 _bt.allocated(res, size);
1562 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1564 assert(size < SmallForDictionary, "just checking");
1566 res = _indexedFreeList[size].get_chunk_at_head();
1568 res = getChunkFromIndexedFreeListHelper(size);
1570 _bt.verify_not_unallocated((HeapWord*) res, size);
1571 assert(res == NULL || res->size() == size, "Incorrect block size");
1576 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1580 if (size < SmallForDictionary) {
1581 assert(_indexedFreeList[size].head() == NULL ||
1582 _indexedFreeList[size].surplus() <= 0,
1583 "List for this size should be empty or under populated");
1585 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1591 // Tried small linAB of size 256 (size in indexed list)
1595 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1597 // Do not replenish from an underpopulated size.
1605 if (newFc == NULL && replenish_size > size) {
1617 size_t num_blk = newFc->size() / size;
1619 assert(newFc->size() % size == 0, "Should be integral multiple of request");
1628 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1631 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1633 curFc->set_size(size);
1636 _bt.verify_not_unallocated((HeapWord*) fc, size);
1637 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1638 _bt.mark_block((HeapWord*)curFc, size);
1639 split_birth(size);
1645 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1647 curFc->set_size(size);
1648 _bt.mark_block((HeapWord*)curFc, size);
1649 split_birth(size);
1660 fc = getChunkFromDictionaryExact(size);
1667 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1669 FreeChunk* fc = _dictionary->get_chunk(size);
1673 _bt.allocated((HeapWord*)fc, fc->size());
1674 if (fc->size() >= size + MinChunkSize) {
1675 fc = splitChunkAndReturnRemainder(fc, size);
1677 assert(fc->size() >= size, "chunk too small");
1678 assert(fc->size() < size + MinChunkSize, "chunk too big");
1679 _bt.verify_single_block((HeapWord*)fc, fc->size());
1684 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1686 FreeChunk* fc = _dictionary->get_chunk(size);
1690 _bt.allocated((HeapWord*)fc, fc->size());
1691 if (fc->size() == size) {
1692 _bt.verify_single_block((HeapWord*)fc, size);
1695 assert(fc->size() > size, "get_chunk() guarantee");
1696 if (fc->size() < size + MinChunkSize) {
1699 fc = _dictionary->get_chunk(size + MinChunkSize);
1703 _bt.allocated((HeapWord*)fc, fc->size());
1705 assert(fc->size() >= size + MinChunkSize, "tautology");
1706 fc = splitChunkAndReturnRemainder(fc, size);
1707 assert(fc->size() == size, "chunk is wrong size");
1708 _bt.verify_single_block((HeapWord*)fc, size);
1716 size_t size = chunk->size();
1717 _bt.verify_single_block((HeapWord*)chunk, size);
1719 _bt.freed((HeapWord*)chunk, size);
1731 size_t size = fc->size();
1732 _bt.verify_single_block((HeapWord*) fc, size);
1733 _bt.verify_not_unallocated((HeapWord*) fc, size);
1735 _indexedFreeList[size].return_chunk_at_tail(fc);
1737 _indexedFreeList[size].return_chunk_at_head(fc);
1741 _indexedFreeList[size].verify_stats();
1753 HeapWord* chunk, size_t size) {
1768 size_t old_size = ec->size();
1771 size += old_size;
1776 ec->set_size(size);
1777 debug_only(ec->mangleFreed(size));
1778 if (size < SmallForDictionary) {
1779 lock = _indexedFreeListParLocks[size];
1782 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1787 coalBirth(size);
1792 size_t size) {
1796 _bt.verify_single_block(chunk, size);
1799 fc->set_size(size);
1800 debug_only(fc->mangleFreed(size));
1801 if (size < SmallForDictionary) {
1810 size_t size, bool coalesced) {
1815 _bt.single_block(chunk, size);
1817 addChunkToFreeLists(chunk, size);
1824 size_t size = fc->size();
1827 if (size < SmallForDictionary) {
1832 _bt.verify_single_block((HeapWord*)fc, size);
1838 size_t size = fc->size();
1841 _bt.verify_single_block((HeapWord*)fc, size);
1844 _bt.allocated((HeapWord*)fc, size);
1850 size_t size = fc->size();
1851 _bt.verify_single_block((HeapWord*)fc, size);
1854 verifyIndexedFreeList(size);
1857 _indexedFreeList[size].remove_chunk(fc);
1860 verifyIndexedFreeList(size);
1866 /* A hint is the next larger size that has a surplus.
1867 Start search at a size large enough to guarantee that
1893 /* Requires fl->size >= numWords + MinChunkSize */
1897 size_t oldNumWords = curr->size();
1898 assert(numWords >= MinChunkSize, "Word size is too small");
1918 size_t size = chunk->size();
1919 assert(size > new_size, "Split from a smaller block?");
1921 assert(size == adjustObjectSize(size), "alignment problem");
1922 size_t rem_size = size - new_size;
1934 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1942 split(size, rem_size);
1946 split(size ,rem_size);
2053 "Minimum block size requirement");
2068 // Reset the linAB refill and allocation size limit.
2106 blk->_word_size = fc->size();
2127 #define obj_size(q) adjustObjectSize(oop(q)->size())
2178 gclog_or_tty->print("size[%d] : ", i);
2243 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2244 if (size < SmallForDictionary) {
2245 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2249 return dictionary()->coal_dict_over_populated(size);
2253 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2254 assert(size < SmallForDictionary, "Size too large for indexed list");
2255 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2260 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2261 assert(size < SmallForDictionary, "Size too large for indexed list");
2262 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2267 void CompactibleFreeListSpace::coalBirth(size_t size) {
2268 if (size < SmallForDictionary) {
2269 smallCoalBirth(size);
2271 dictionary()->dict_census_udpate(size,
2277 void CompactibleFreeListSpace::coalDeath(size_t size) {
2278 if(size < SmallForDictionary) {
2279 smallCoalDeath(size);
2281 dictionary()->dict_census_udpate(size,
2287 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2288 assert(size < SmallForDictionary, "Size too large for indexed list");
2289 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2294 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2295 assert(size < SmallForDictionary, "Size too large for indexed list");
2296 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2301 void CompactibleFreeListSpace::split_birth(size_t size) {
2302 if (size < SmallForDictionary) {
2303 smallSplitBirth(size);
2305 dictionary()->dict_census_udpate(size,
2311 void CompactibleFreeListSpace::splitDeath(size_t size) {
2312 if (size < SmallForDictionary) {
2313 smallSplitDeath(size);
2315 dictionary()->dict_census_udpate(size,
2362 res = _sp->adjustObjectSize(p->size());
2369 res = fc->size();
2378 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2379 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2518 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2519 FreeChunk* fc = _indexedFreeList[size].head();
2520 FreeChunk* tail = _indexedFreeList[size].tail();
2521 size_t num = _indexedFreeList[size].count();
2523 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2526 guarantee(fc->size() == size, "Size inconsistency");
2550 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2554 total_free += fl->count() * fl->size();
2556 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2657 // Get a chunk of blocks of the right size and update related
2685 // Update stats table entry for this block size
2778 // nn is the number of chunks of size cur_sz that
2780 // "n" chunks of size word_sz each.
2785 // Update split death stats for the cur_sz-size blocks list:
2787 // we just took from the cur_sz-size blocks list and which
2806 size_t fc_size = fc->size();
2812 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2833 // Update birth stats for this block size.
2855 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
2856 dictionary()->dict_census_udpate(fc->size(),
2868 _bt.verify_single_block((HeapWord*)fc, fc->size());
2869 const size_t nn = fc->size() / word_sz;
2872 rem = fc->size() - n * word_sz;
2904 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2918 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2939 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2940 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2946 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2951 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2952 _bt.verify_single_block((HeapWord*)fc, fc->size());
2957 // Update the stats for this block size.
2977 // The "size" of each task is fixed according to rescan_task_size.
3000 // The "size" of each task is fixed according to rescan_task_size.