Lines Matching refs:page_t

44 extern page_t *ppvm_base;
53 * The page_t memory for incoming pages is allocated from existing memory
56 * some memory is always reserved ahead of time for page_t allocation.
57 * Each 4MB of reserved page_t's guarantees a 256MB (x64) addition without
58 * page_t allocation. The added 256MB added memory could theoretically
61 #define RSV_SIZE 0x40000000 /* add size with rsrvd page_t's 1G */
70 * The page_t's for the incoming memory are allocated from
77 page_t *pp, *opp, *epp;
86 if ((base + npgs) * sizeof (page_t) > ppvm_size)
91 metapgs = btopr(npgs * sizeof (page_t));
96 * Another memseg has page_t's in the same
99 * sizeof (page_t) and therefore 'pp'
107 * If the last page_t in the current page
109 * work. The first part of the page_t is
111 * the page_t will be allocated below.
113 ASSERT(PAGESIZE % sizeof (page_t));
114 pp = (page_t *)P2ROUNDUP((uint64_t)pp, PAGESIZE);
121 * Another memseg has page_t's in the same
124 * sizeof (page_t) and therefore 'epp'
130 ASSERT(PAGESIZE % sizeof (page_t));
149 * contain the page_t's for the incoming memory.
171 page_t *pp;
200 page_t *pp;
217 * Remap a memseg's page_t's to dummy pages. Skip the low/high
225 page_t *pp;
227 page_t *epp;
231 metapgs = btopr(MSEG_NPAGES(seg) * sizeof (page_t));
240 * when page_t size is changed. It is left here as a starting
241 * point if the unaligned page_t size needs to be supported.
247 * Another memseg has page_t's in the same
250 * sizeof (page_t) and therefore 'seg->pages'
258 * If the last page_t in the current page
260 * work. The first part of the page_t is
262 * been called. The second part of the page_t
266 ASSERT(PAGESIZE % sizeof (page_t));
267 pp = (page_t *)P2ROUNDUP((uint64_t)pp, PAGESIZE);
274 * Another memseg has page_t's in the same
277 * sizeof (page_t) and therefore 'seg->epages'
283 ASSERT(PAGESIZE % sizeof (page_t));