Lines Matching refs:ppa

3214  * Check if all pages in ppa array are complete smaller than szc pages and
3216 * entire ppa array is relocated into one szc page. If these conditions are
3225 * If all pages in ppa array happen to be physically contiguous to make one
3230 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3248 pp = ppa[i];
3293 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3304 ASSERT(ppa[i]->p_szc < szc);
3305 if (!page_tryupgrade(ppa[i])) {
3307 page_downgrade(ppa[j]);
3309 *pszc = ppa[i]->p_szc;
3327 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3330 ppa[i]->p_szc = szc;
3333 ASSERT(PAGE_EXCL(ppa[i]));
3334 page_downgrade(ppa[i]);
3346 * page_size(szc)) range and for private segment return them in ppa array.
3352 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3353 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3358 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3520 ppa[pgidx] = pp;
3585 ppa[pgidx] = pp;
3629 ppa[pgidx++] = pp;
3637 ASSERT(ppa[i] != NULL);
3638 ASSERT(PAGE_EXCL(ppa[i]));
3639 ASSERT(ppa[i]->p_vnode == vp);
3640 ASSERT(ppa[i]->p_offset ==
3642 page_downgrade(ppa[i]);
3644 ppa[pages] = NULL;
3653 ASSERT(ppa[i] != NULL);
3654 ASSERT(PAGE_EXCL(ppa[i]));
3655 ASSERT(ppa[i]->p_vnode == vp);
3656 ASSERT(ppa[i]->p_offset ==
3658 page_unlock(ppa[i]);
3660 ppa[0] = NULL;
3791 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3792 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3795 ASSERT((ppa)[i]->p_vnode == \
3796 (ppa)[0]->p_vnode); \
3797 hat_setmod((ppa)[i]); \
3802 ASSERT((ppa)[i]->p_vnode == \
3803 (ppa)[0]->p_vnode); \
3804 if (!hat_ismod((ppa)[i])) { \
3844 page_t **ppa;
3915 ppa = kmem_alloc(ppasize, KM_SLEEP);
3996 ppa[0] = NULL;
3999 segtype == MAP_PRIVATE ? ppa : NULL)) {
4014 vp, off, szc, ppa, &pplist,
4026 ppa[0] == NULL);
4027 if (physcontig && ppa[0] == NULL) {
4031 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4039 ppa[0] = NULL;
4041 &vpprot, ppa, pgsz, seg, a, arw,
4046 ASSERT(PAGE_LOCKED(ppa[i]));
4047 ASSERT(!PP_ISFREE(ppa[i]));
4048 ASSERT(ppa[i]->p_vnode == vp);
4049 ASSERT(ppa[i]->p_offset ==
4120 page_unlock(ppa[i]);
4169 seg, a, prot, ppa, vpage, segvn_anypgsz,
4180 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4187 hat_memload_array(hat, a, pgsz, ppa, prot,
4193 page_unlock(ppa[i]);
4204 pfn = page_pptonum(ppa[0]);
4230 if ((pszc = ppa[0]->p_szc) == szc &&
4236 ASSERT(PAGE_LOCKED(ppa[i]));
4237 ASSERT(!PP_ISFREE(ppa[i]));
4238 ASSERT(page_pptonum(ppa[i]) ==
4240 ASSERT(ppa[i]->p_szc == szc);
4241 ASSERT(ppa[i]->p_vnode == vp);
4242 ASSERT(ppa[i]->p_offset ==
4258 if (PP_ISMIGRATE(ppa[0])) {
4259 page_migrate(seg, a, ppa, pages);
4261 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4264 ppa, prot & vpprot, hat_flag,
4269 page_unlock(ppa[i]);
4303 page_unlock(ppa[i]);
4323 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4342 page_unlock(ppa[i]);
4359 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4364 ppa, prot & vpprot, hat_flag,
4370 ppa[i], prot & vpprot,
4376 page_unlock(ppa[i]);
4390 ASSERT(pszc == ppa[0]->p_szc);
4399 * p_szc of ppa[0] can change since we haven't
4406 szcmtx = page_szc_lock(ppa[0]);
4407 pszc = ppa[0]->p_szc;
4409 ASSERT(ppa[0]->p_szc <= pszc);
4427 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4429 hat_memload_array_region(hat, a, pgsz, ppa,
4434 page_unlock(ppa[i]);
4454 page_unlock(ppa[i]);
4468 segvn_relocate_pages(ppa, pplist);
4476 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4481 ASSERT(ppa[i]->p_szc < szc);
4484 ppa[i], prot & vpprot, hat_flag,
4489 hat_memload_array_region(hat, a, pgsz, ppa,
4494 ASSERT(PAGE_SHARED(ppa[i]));
4495 page_unlock(ppa[i]);
4586 kmem_free(ppa, ppasize);
4649 page_t **ppa;
4700 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4732 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4750 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4753 ppa[0]->p_szc <= szc);
4755 ppa[0]->p_szc >= szc);
4761 page_migrate(seg, a, ppa, pages);
4769 hat_memload_array(hat, a, pgsz, ppa,
4777 page_unlock(ppa[i]);
4885 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4890 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
6475 page_t **ppa;
6501 ppa = kmem_alloc(ppasize, KM_SLEEP);
6511 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6527 ppa[pg_idx++] = pp;
6531 if (ppa[0] == NULL) {
6532 kmem_free(ppa, ppasize);
6537 ppa[pg_idx] = NULL;
6540 /* Find each large page within ppa, and adjust its claim */
6542 /* Does ppa cover a single large page? */
6543 if (ppa[0]->p_szc == seg->s_szc) {
6545 err = page_addclaim_pages(ppa);
6547 err = page_subclaim_pages(ppa);
6549 for (i = 0; ppa[i]; i += pgcnt) {
6550 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6552 err = page_addclaim_pages(&ppa[i]);
6554 err = page_subclaim_pages(&ppa[i]);
6561 ASSERT(ppa[i] != NULL);
6562 page_unlock(ppa[i]);
6565 kmem_free(ppa, ppasize);