Lines Matching defs:mblock

48  * tie together cpu nodes and mblock nodes, and contain mask and match
49 * properties that identify the portion of an mblock that belongs to the
51 * but an mblock defines Real Addresses (RA). To translate, the mblock
55 * (ra + mblock.ra_to_pa) & lgroup.mask == lgroup.match
69 * has exactly 1 mem_node, and plat_pfn_to_mem_node() must find the mblock
70 * containing a pfn, apply the mblock's ra_to_pa adjustment, and extract the
112 * Build mblocks based on mblock nodes read from the MD.
173 static pfn_t base_ra_to_pa_pfn = 0; /* ra_to_pa for single mblock memory */
560 * For each mblock, retrieve its data and store it.
582 MPO_STATUS("lgrp_traverse: No mblock nodes detected in Machine "
886 * it is striped across an mblock in a repeating pattern of contiguous memory
891 * The stripe of an mnode that falls within an mblock is described by the type
892 * mem_stripe_t, and there is one mem_stripe_t per mnode per mblock. The
904 * physbase: First valid page in mem_node in the corresponding mblock
905 * physmax: Last valid page in mem_node in mblock
912 * exists: Set to 1 if the mblock has memory in this mem_node stripe.
922 * mblock 0 mblock 1
1008 * is taken from from the home bits. Find the mblock in which
1026 panic("plat_pfn_to_mem_node() failed to find mblock: pfn=%lx\n", pfn);
1049 * Find the mblock in which the pfn falls
1061 panic("plat_rapfn_to_papfn() failed to find mblock: pfn=%lx\n", pfn);
1076 * mblock containing pfn, and return its starting pfn
1079 * from "it", advance to the next mblock, and return its
1085 * Returns: starting pfn for the iteration for the mnode/mblock,
1090 * to advance the pfn within an mblock using address arithmetic;
1096 * The last mblock in continuation case may be invalid because
1107 struct mblock_md *mblock;
1125 /* Check if mpo is not enabled and we only have one mblock */
1160 * Find mblock that contains pfn for mnode's stripe, or first such an
1161 * mblock after pfn, else pfn is out of bound and we'll return -1.
1182 mblock = &mpo_mblock[i];
1187 it->mi_ra_to_pa = btop(mblock->ra_to_pa);
1238 * Iterate over all the stripes for this mnode (one per mblock),
1370 * If mblock is smaller than the max page size, then
1376 MPO_STATUS("Small mblock spans mnodes; "
1394 * Find start of last large page in mblock in RA space.
1395 * If page extends into the next mblock, verify the
1456 * Allocate memory for mblock an stripe arrays from either static or
1496 "for mblock structures \n");
1538 * Install mblock config passed in mc as the global configuration.
1556 * Traverse mblocknodes, read the mblock properties from the MD, and
1565 mblock_md_t *mblock = mc->mc_mblocks;
1571 &mblock[i].base);
1580 &mblock[i].size);
1589 PROP_LG_RA_PA_OFFSET, &mblock[i].ra_to_pa);
1593 mblock[i].ra_to_pa = 0;
1595 MPO_DEBUG("mblock[%ld]: base = %lx, size = %lx, "
1597 mblock[i].base,
1598 mblock[i].size,
1599 mblock[i].ra_to_pa);
1602 if (mblock[i].base > mblock[i].base + mblock[i].size) {
1606 mblock[i].base, mblock[i].size);
1612 if (mblock[i].size != 0) {
1613 uint64_t base = mblock[i].base;
1614 uint64_t end = base + mblock[i].size;
1616 mblock[i].base_pfn = btop(base);
1617 mblock[i].end_pfn = btop(end - 1);
1624 "No non-empty mblock nodes were found "
1633 mblock_sort(mblock, mc->mc_nmblocks);
1639 * Update mblock config after a memory DR add. The added range is not
1640 * needed, as we read *all* mblock nodes from the MD. Save the mblocks
1662 MPO_STATUS("No mblock nodes detected in Machine Descriptor\n");
1681 * Allocate a new mblock config, copy old config to the new, modify the new
1692 mblock_md_t *mblock;
1699 * Allocate one extra in case the deletion splits an mblock.
1703 mblock = mc_new->mc_mblocks;
1704 bcopy(mc_old->mc_mblocks, mblock, nmblocks * sizeof (mblock_md_t));
1707 * Find the mblock containing the deleted range and adjust it in
1712 base = btop(mblock[i].base);
1713 end = base + btop(mblock[i].size) - 1;
1716 * Adjust the mblock based on the subset that was deleted.
1724 * The memory to be deleted is a mblock or a subset of
1729 mblock[j] = mblock[j + 1];
1731 bzero(&mblock[nmblocks], sizeof (*mblock));
1735 mblock[j + 1] = mblock[j];
1736 mblock[i].size = ptob(ubase - base);
1737 mblock[i].end_pfn = ubase - 1;
1738 mblock[i + 1].base = ptob(uend + 1);
1739 mblock[i + 1].size = ptob(end - uend);
1740 mblock[i + 1].base_pfn = uend + 1;
1746 mblock[i].base = ptob(uend + 1);
1747 mblock[i].size -= ptob(uend - ubase + 1);
1749 mblock[i].base_pfn = base;
1750 mblock[i].end_pfn = end;
1756 mblock[i].size -= ptob(uend - ubase + 1);
1758 mblock[i].base_pfn = base;
1759 mblock[i].end_pfn = end;
1782 mblock_md_t *mblock = mc->mc_mblocks;
1798 mc->mc_stripes[i].physbase = mblock[i].base_pfn;
1799 mc->mc_stripes[i].physmax = mblock[i].end_pfn;
1811 base = mblock[i].base;
1812 end = base + mblock[i].size;
1813 ra_to_pa = mblock[i].ra_to_pa;
1826 * Loop over all lgroups covered by the mblock, creating a
1830 /* mblock may not span all lgroups */
1921 * if a subset of a mblock is added/deleted.