Lines Matching defs:svd

266 	struct segvn_data *svd = buf;
268 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
269 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
270 svd->svn_trnext = svd->svn_trprev = NULL;
278 struct segvn_data *svd = buf;
280 rw_destroy(&svd->lock);
281 mutex_destroy(&svd->segfree_syncmtx);
363 * (unmap segment's address range and set svd->amp to NULL).
545 struct segvn_data *svd;
810 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
813 seg->s_data = (void *)svd;
816 svd->seg = seg;
817 svd->vp = a->vp;
821 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
822 svd->prot = a->prot;
823 svd->maxprot = a->maxprot;
824 svd->pageprot = 0;
825 svd->type = a->type;
826 svd->vpage = NULL;
827 svd->cred = cred;
828 svd->advice = MADV_NORMAL;
829 svd->pageadvice = 0;
830 svd->flags = (ushort_t)a->flags;
831 svd->softlockcnt = 0;
832 svd->softlockcnt_sbase = 0;
833 svd->softlockcnt_send = 0;
834 svd->svn_inz = 0;
835 svd->rcookie = HAT_INVALID_REGION_COOKIE;
836 svd->pageswap = 0;
841 if (svd->type == MAP_SHARED && svd->vp != NULL &&
842 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
843 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
844 segvn_inval_trcache(svd->vp);
848 if ((svd->amp = amp) == NULL) {
849 svd->anon_index = 0;
850 if (svd->type == MAP_SHARED) {
851 svd->swresv = 0;
860 svd->amp = anonmap_alloc(seg->s_size, swresv,
862 svd->amp->a_szc = seg->s_szc;
869 svd->swresv = swresv;
901 svd->anon_index = anon_num;
902 svd->swresv = 0;
919 if (svd->flags & MAP_TEXT) {
923 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
924 svd->amp->a_szc = seg->s_szc;
925 svd->anon_index = 0;
926 svd->swresv = swresv;
968 svd->prot & ~PROT_WRITE, hat_flag);
973 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
985 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
987 if (svd->type == MAP_SHARED)
988 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
989 svd->vp, svd->offset, seg->s_size);
993 ASSERT(svd->amp == NULL);
994 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
995 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
1000 ASSERT(!trok || !(svd->prot & PROT_WRITE));
1001 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1508 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1510 ulong_t old_idx = svd->anon_index;
1522 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1525 vpp = &svd->vpage[seg_page(seg, addr)];
1532 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1537 if (svd->svn_inz == SEGVN_INZ_ALL ||
1538 (svd->svn_inz == SEGVN_INZ_VPP &&
1546 PAGESIZE, seg, addr, S_READ, svd->cred);
1571 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1589 if ((len = svd->swresv) != 0) {
1590 if (anon_resv(svd->swresv) == 0)
1604 if ((newsvd->vp = svd->vp) != NULL) {
1605 VN_HOLD(svd->vp);
1606 if (svd->type == MAP_SHARED)
1607 lgrp_shm_policy_init(NULL, svd->vp);
1609 newsvd->offset = svd->offset;
1610 newsvd->prot = svd->prot;
1611 newsvd->maxprot = svd->maxprot;
1612 newsvd->pageprot = svd->pageprot;
1613 newsvd->type = svd->type;
1614 newsvd->cred = svd->cred;
1616 newsvd->advice = svd->advice;
1617 newsvd->pageadvice = svd->pageadvice;
1618 newsvd->svn_inz = svd->svn_inz;
1619 newsvd->swresv = svd->swresv;
1620 newsvd->pageswap = svd->pageswap;
1621 newsvd->flags = svd->flags;
1625 newsvd->policy_info = svd->policy_info;
1628 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1632 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1633 svd->tr_state == SEGVN_TR_OFF);
1634 if (svd->tr_state == SEGVN_TR_ON) {
1638 newsvd->tr_state = svd->tr_state;
1644 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1645 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1647 if (svd->type == MAP_SHARED) {
1648 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1653 newsvd->anon_index = svd->anon_index;
1664 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1665 svd->svn_inz == SEGVN_INZ_ALL ||
1666 svd->svn_inz == SEGVN_INZ_VPP);
1698 if (svd->softlockcnt ||
1699 svd->svn_inz != SEGVN_INZ_NONE) {
1709 if (svd->softlockcnt && reclaim == 1) {
1730 svd->anon_index, newsvd->amp->ahp,
1732 svd->vp != NULL);
1734 anon_dup(amp->ahp, svd->anon_index,
1747 if (svd->vpage != NULL) {
1749 struct vpage *ovp = svd->vpage;
1768 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1771 newsvd->rcookie = svd->rcookie;
1816 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1820 ASSERT(svd->vp != NULL);
1826 free_vp_pages(svd->vp, svd->offset + off, len);
1837 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1841 ASSERT(svd->pageswap);
1842 ASSERT(svd->vpage != NULL);
1844 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1846 for (vp = svd->vpage; vp < evp; vp++) {
1857 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1882 if (svd->softlockcnt > 0) {
1883 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1889 if (svd->type == MAP_SHARED) {
1920 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1921 ASSERT(svd->amp == NULL);
1922 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1924 svd->rcookie, HAT_REGION_TEXT);
1925 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1932 } else if (svd->tr_state == SEGVN_TR_INIT) {
1933 svd->tr_state = SEGVN_TR_OFF;
1934 } else if (svd->tr_state == SEGVN_TR_ON) {
1935 ASSERT(svd->amp != NULL);
1937 ASSERT(svd->amp == NULL);
1938 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1950 if (svd->vp) {
1953 error = VOP_DELMAP(svd->vp,
1954 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1955 seg->s_as, addr, len, svd->prot, svd->maxprot,
1956 svd->type, svd->cred, NULL);
1967 if (svd->tr_state == SEGVN_TR_OFF) {
1971 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1972 ASSERT(svd->amp == NULL);
1973 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1974 ASSERT(svd->type == MAP_PRIVATE);
1975 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1977 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1978 } else if (svd->tr_state == SEGVN_TR_ON) {
1979 ASSERT(svd->amp != NULL);
1980 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1982 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1984 if (svd->tr_state != SEGVN_TR_OFF) {
1985 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1986 svd->tr_state = SEGVN_TR_OFF;
1992 if (svd->vp != NULL && free_pages != 0) {
2000 if (svd->type == MAP_SHARED && svd->vp != NULL &&
2001 (svd->vp->v_flag & VVMEXEC) &&
2002 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2003 segvn_inval_trcache(svd->vp);
2018 amp = svd->amp;
2025 if (svd->vpage != NULL) {
2029 ovpage = svd->vpage; /* keep pointer to vpage */
2032 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2033 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2040 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2046 if (svd->type == MAP_SHARED) {
2048 ASSERT(svd->softlockcnt == 0);
2057 svd->anon_index, len,
2061 svd->anon_index,
2065 ASSERT(svd->type == MAP_SHARED);
2068 svd->anon_index, len);
2076 if (svd->type == MAP_SHARED) {
2083 svd->anon_index += dpages;
2085 if (svd->vp != NULL)
2086 svd->offset += len;
2091 if (svd->swresv) {
2092 if (svd->flags & MAP_NORESERVE) {
2094 oswresv = svd->swresv;
2096 svd->swresv = ptob(anon_pages(amp->ahp,
2097 svd->anon_index, npages));
2098 anon_unresv_zone(oswresv - svd->swresv,
2102 svd->swresv;
2106 if (svd->pageswap) {
2107 oswresv = svd->swresv;
2108 svd->swresv =
2110 ASSERT(oswresv >= svd->swresv);
2111 unlen = oswresv - svd->swresv;
2113 svd->swresv -= len;
2114 ASSERT(svd->swresv == seg->s_size);
2131 if (svd->vpage != NULL) {
2135 ovpage = svd->vpage; /* keep pointer to vpage */
2138 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2139 bcopy(ovpage, svd->vpage, nbytes);
2147 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2151 ulong_t an_idx = svd->anon_index + npages;
2158 if (svd->type == MAP_SHARED) {
2160 ASSERT(svd->softlockcnt == 0);
2174 ASSERT(svd->type == MAP_SHARED);
2185 if (svd->type == MAP_SHARED) {
2196 if (svd->swresv) {
2197 if (svd->flags & MAP_NORESERVE) {
2199 oswresv = svd->swresv;
2200 svd->swresv = ptob(anon_pages(amp->ahp,
2201 svd->anon_index, npages));
2202 anon_unresv_zone(oswresv - svd->swresv,
2206 svd->swresv;
2210 if (svd->pageswap) {
2211 oswresv = svd->swresv;
2212 svd->swresv =
2214 ASSERT(oswresv >= svd->swresv);
2215 unlen = oswresv - svd->swresv;
2217 svd->swresv -= len;
2218 ASSERT(svd->swresv == seg->s_size);
2248 *nsvd = *svd;
2250 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2255 nsvd->svn_inz = svd->svn_inz;
2258 if (svd->vp != NULL) {
2263 crhold(svd->cred);
2265 if (svd->vpage == NULL) {
2272 ovpage = svd->vpage; /* keep pointer to vpage */
2276 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2278 bcopy(ovpage, svd->vpage, nbytes);
2301 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2305 ulong_t an_idx = svd->anon_index + opages;
2312 if (svd->type == MAP_SHARED) {
2314 ASSERT(svd->softlockcnt == 0);
2327 ASSERT(svd->type == MAP_SHARED);
2337 if (svd->type == MAP_SHARED) {
2343 nsvd->anon_index = svd->anon_index +
2345 if (svd->type == MAP_SHARED) {
2352 ASSERT(svd->type == MAP_PRIVATE);
2356 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2361 svd->anon_index = 0;
2369 if (svd->swresv) {
2370 if (svd->flags & MAP_NORESERVE) {
2372 oswresv = svd->swresv;
2373 svd->swresv = ptob(anon_pages(amp->ahp,
2374 svd->anon_index, btop(seg->s_size)));
2377 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2378 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2382 (svd->swresv + nsvd->swresv);
2386 if (svd->pageswap) {
2387 oswresv = svd->swresv;
2388 svd->swresv = segvn_count_swap_by_vpages(seg);
2390 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2391 unlen = oswresv - (svd->swresv + nsvd->swresv);
2394 svd->swresv) {
2399 svd->swresv = seg->s_size;
2416 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2426 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2428 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2440 if (svd->vpage != NULL) {
2441 kmem_free(svd->vpage, vpgtob(npages));
2442 svd->vpage = NULL;
2444 if ((amp = svd->amp) != NULL) {
2453 if (svd->type == MAP_PRIVATE) {
2460 svd->anon_index, seg->s_size,
2463 anon_free(amp->ahp, svd->anon_index,
2473 ASSERT(svd->softlockcnt == 0);
2494 svd->amp = NULL;
2497 } else if (svd->type == MAP_PRIVATE) {
2504 anon_free_pages(amp->ahp, svd->anon_index,
2507 anon_free(amp->ahp, svd->anon_index,
2519 if ((len = svd->swresv) != 0) {
2520 anon_unresv_zone(svd->swresv,
2525 seg->s_as->a_resvsize -= svd->swresv;
2526 svd->swresv = 0;
2532 if (svd->vp != NULL) {
2533 if (svd->type == MAP_SHARED)
2534 lgrp_shm_policy_fini(NULL, svd->vp);
2535 VN_RELE(svd->vp);
2536 svd->vp = NULL;
2538 crfree(svd->cred);
2539 svd->pageprot = 0;
2540 svd->pageadvice = 0;
2541 svd->pageswap = 0;
2542 svd->cred = NULL;
2549 ASSERT(svd->softlockcnt == 0);
2550 mutex_enter(&svd->segfree_syncmtx);
2551 mutex_exit(&svd->segfree_syncmtx);
2554 kmem_cache_free(segvn_cache, svd);
2566 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2576 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2578 if ((amp = svd->amp) != NULL)
2579 anon_index = svd->anon_index + seg_page(seg, addr);
2581 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2582 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2583 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2594 vp = svd->vp;
2595 offset = svd->offset +
2600 vp = svd->vp;
2601 offset = svd->offset +
2631 ASSERT(svd->softlockcnt >= btop(len));
2632 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2712 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2729 if (svd->flags & MAP_TEXT) {
2733 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2735 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2741 if (svd->pageprot) {
2764 prot = svd->prot;
2768 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2776 if ((amp = svd->amp) != NULL) {
2778 anon_index = svd->anon_index + seg_page(seg, addr);
2783 if (svd->vp == NULL && amp != NULL) {
2789 if (svd->flags & MAP_NORESERVE) {
2792 atomic_add_long(&svd->swresv, ptob(1));
2801 svd->cred)) == NULL) {
2861 (svd->flags & MAP_NORESERVE) &&
2864 ASSERT(svd->type == MAP_PRIVATE);
2879 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2898 seg, addr, rw, svd->cred);
2902 if (svd->type == MAP_SHARED) {
2928 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2953 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2955 } else if (svd->tr_state == SEGVN_TR_ON) {
2960 ASSERT(svd->vp != NULL);
2986 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2987 (!svd->pageprot && svd->prot == (prot & vpprot)));
2989 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2991 svd->rcookie);
3002 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3037 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3068 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3070 atomic_add_long(&svd->swresv, ptob(1));
3079 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3115 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3129 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3357 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3488 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3501 B_READ, svd->cred, NULL);
3558 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3569 B_READ, svd->cred, NULL);
3634 if (svd->type == MAP_PRIVATE) {
3787 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3828 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3829 struct anon_map *amp = svd->amp;
3830 uchar_t segtype = svd->type;
3839 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 struct vpage *vpage = (svd->vpage != NULL) ?
3842 &svd->vpage[seg_page(seg, a)] : NULL;
3843 vnode_t *vp = svd->vp;
3864 int tron = (svd->tr_state == SEGVN_TR_ON);
3871 ASSERT(!(svd->flags & MAP_NORESERVE));
3875 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3878 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3883 if (svd->flags & MAP_TEXT) {
3887 if (svd->pageprot) {
3904 prot = svd->prot;
3979 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3990 atomic_add_long((ulong_t *)&svd->softlockcnt,
4013 physcontig = segvn_fill_vp_pages(svd,
4042 svd->cred, NULL);
4079 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4170 tron ? PG_LOCAL : 0, svd->cred);
4185 ASSERT(svd->rcookie ==
4201 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4202 (!svd->pageprot && svd->prot == (prot & vpprot)));
4265 svd->rcookie);
4365 svd->rcookie);
4371 hat_flag, svd->rcookie);
4430 prot & vpprot, hat_flag, svd->rcookie);
4485 svd->rcookie);
4490 prot & vpprot, hat_flag, svd->rcookie);
4554 off = svd->offset + (uintptr_t)(a - seg->s_base);
4555 aindx = svd->anon_index + seg_page(seg, a);
4556 vpage = (svd->vpage != NULL) ?
4557 &svd->vpage[seg_page(seg, a)] : NULL;
4577 off = svd->offset +
4579 aindx = svd->anon_index + seg_page(seg, a);
4580 vpage = (svd->vpage != NULL) ?
4581 &svd->vpage[seg_page(seg, a)] : NULL;
4610 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4611 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4615 ASSERT(svd->softlockcnt == 0);
4622 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4637 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4638 struct anon_map *amp = svd->amp;
4639 uchar_t segtype = svd->type;
4646 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4647 struct vpage *vpage = (svd->vpage != NULL) ?
4648 &svd->vpage[seg_page(seg, a)] : NULL;
4658 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4663 ASSERT(!(svd->flags & MAP_NORESERVE));
4666 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4667 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4669 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4674 if (svd->flags & MAP_TEXT) {
4678 if (svd->pageprot) {
4696 prot = svd->prot;
4705 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4726 atomic_add_long((ulong_t *)&svd->softlockcnt,
4733 segvn_anypgsz, pgflags, svd->cred);
4739 (ulong_t *)&svd->softlockcnt,
4763 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4854 aindx = svd->anon_index + seg_page(seg, a);
4855 vpage = (svd->vpage != NULL) ?
4856 &svd->vpage[seg_page(seg, a)] : NULL;
4877 aindx = svd->anon_index + seg_page(seg, a);
4878 vpage = (svd->vpage != NULL) ?
4879 &svd->vpage[seg_page(seg, a)] : NULL;
4925 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4941 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4944 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4954 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4960 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4964 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4965 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4967 if (svd->tr_state == SEGVN_TR_INIT) {
4968 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4969 if (svd->tr_state == SEGVN_TR_INIT) {
4970 ASSERT(svd->vp != NULL && svd->amp == NULL);
4971 ASSERT(svd->flags & MAP_TEXT);
4972 ASSERT(svd->type == MAP_PRIVATE);
4974 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4975 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4976 svd->amp != NULL);
4978 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4980 } else if (svd->tr_state != SEGVN_TR_OFF) {
4981 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4983 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4984 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4985 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4989 if (svd->tr_state == SEGVN_TR_ON) {
4990 ASSERT(svd->vp != NULL && svd->amp != NULL);
4992 ASSERT(svd->amp == NULL &&
4993 svd->tr_state == SEGVN_TR_OFF);
4994 } else if (svd->tr_state != SEGVN_TR_OFF) {
4995 svd->tr_state = SEGVN_TR_OFF;
4997 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4998 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5002 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5009 if (svd->pageprot == 0) {
5029 if ((svd->prot & protchk) == 0) {
5030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5035 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5037 ASSERT(svd->amp == NULL);
5038 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5040 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5041 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5046 ASSERT(svd->softlockcnt == 0);
5047 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5049 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5051 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5082 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5100 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5101 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5104 ASSERT(svd->softlockcnt == 0);
5108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5113 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5121 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5122 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5128 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5129 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5131 if (svd->amp == NULL) {
5132 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5133 svd->amp->a_szc = seg->s_szc;
5135 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5155 amp = svd->amp;
5162 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5164 if (svd->vp == NULL) {
5172 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5173 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5183 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5184 anon_index = svd->anon_index + page;
5187 svd->tr_state == SEGVN_TR_OFF &&
5188 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5211 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5213 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5219 if (svd->vpage == NULL)
5222 vpage = &svd->vpage[page];
5224 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5232 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5240 if (svd->advice == MADV_SEQUENTIAL ||
5241 (svd->pageadvice &&
5246 vpp = &svd->vpage[fpage];
5248 fanon_index = svd->anon_index + fpage;
5250 while (pgoff > svd->offset) {
5251 if (svd->advice != MADV_SEQUENTIAL &&
5252 (!svd->pageadvice || (vpage &&
5272 fvp = svd->vp;
5278 fvp = svd->vp;
5312 svd->cred, NULL);
5338 if (svd->vp != NULL) {
5381 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5382 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5417 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5422 vp = svd->vp;
5428 svd->cred, NULL);
5430 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5436 if (svd->type == MAP_PRIVATE)
5494 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5502 } else if (svd->vpage) {
5504 vpage = &svd->vpage[++page];
5519 if (svd->pageprot == 0)
5520 prot = svd->prot & vpprot;
5537 if (svd->flags & MAP_TEXT) {
5544 if (svd->tr_state != SEGVN_TR_ON &&
5545 pp->p_offset >= svd->offset &&
5546 pp->p_offset < svd->offset + seg->s_size) {
5548 diff = pp->p_offset - svd->offset;
5554 ASSERT(svd->vp == pp->p_vnode);
5557 if (svd->pageprot)
5558 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5568 anon_index = svd->anon_index + page;
5587 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5592 svd->rcookie);
5603 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5617 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5624 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5625 if ((amp = svd->amp) != NULL) {
5635 svd->anon_index + seg_page(seg, addr))) != NULL) {
5638 0, seg, addr, S_READ, svd->cred);
5641 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5649 if (svd->vp == NULL) {
5650 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5654 vp = svd->vp;
5658 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5660 S_OTHER, svd->cred, NULL);
5662 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5671 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5681 if ((svd->maxprot & prot) != prot)
5684 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5687 if (!svd->pageprot && svd->prot == prot) {
5688 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5697 if (svd->softlockcnt > 0) {
5698 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5704 if (svd->type == MAP_SHARED) {
5705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5716 if (svd->softlockcnt > 0) {
5717 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5722 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5723 ASSERT(svd->amp == NULL);
5724 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5725 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5727 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5729 } else if (svd->tr_state == SEGVN_TR_INIT) {
5730 svd->tr_state = SEGVN_TR_OFF;
5731 } else if (svd->tr_state == SEGVN_TR_ON) {
5732 ASSERT(svd->amp != NULL);
5734 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5738 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5739 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5740 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5741 segvn_inval_trcache(svd->vp);
5749 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5759 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5765 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5790 if (svd->type == MAP_PRIVATE) {
5792 if (!(svd->flags & MAP_NORESERVE) &&
5793 !(svd->swresv && svd->pageswap == 0)) {
5802 svd->pageswap == 0) {
5813 if (svd->vpage == NULL) {
5815 &svd->lock);
5818 svp = &svd->vpage[seg_page(seg, addr)];
5819 evp = &svd->vpage[seg_page(seg,
5822 if (svd->pageswap == 0) {
5848 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5856 if (svd->pageswap == 0 && sz == seg->s_size) {
5857 svd->swresv = sz;
5859 ASSERT(svd->vpage != NULL);
5860 svd->swresv += sz;
5861 svd->pageswap = 1;
5875 if (svd->swresv != 0 && svd->vp != NULL &&
5876 svd->amp == NULL && addr == seg->s_base &&
5877 len == seg->s_size && svd->pageprot == 0) {
5878 ASSERT(svd->pageswap == 0);
5879 anon_unresv_zone(svd->swresv,
5881 svd->swresv = 0;
5888 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5889 if (svd->prot == prot) {
5890 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5893 svd->prot = (uchar_t)prot;
5894 } else if (svd->type == MAP_PRIVATE) {
5911 if (svd->vpage == NULL) {
5912 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5915 svd->pageprot = 1;
5916 if ((amp = svd->amp) != NULL) {
5917 anon_idx = svd->anon_index + seg_page(seg, addr);
5923 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5924 evp = &svd->vpage[seg_page(seg, addr + len)];
5930 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5960 vp = svd->vp;
6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6008 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6014 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6019 if (svd->vpage == NULL) {
6020 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6023 svd->pageprot = 1;
6024 evp = &svd->vpage[seg_page(seg, addr + len)];
6025 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6031 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6036 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6060 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6072 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6074 struct anon_map *amp = svd->amp;
6080 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6104 if (amp != NULL && svd->type == MAP_SHARED) {
6105 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6113 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6119 if (svd->vp != NULL &&
6120 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6124 if (seg->s_szc == 0 && svd->vp != NULL &&
6133 if (svd->pageprot) {
6152 if (svd->softlockcnt > 0) {
6153 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6159 if (svd->type == MAP_SHARED) {
6170 if (svd->softlockcnt > 0) {
6175 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6176 ASSERT(svd->amp == NULL);
6177 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6178 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6180 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6181 } else if (svd->tr_state == SEGVN_TR_INIT) {
6182 svd->tr_state = SEGVN_TR_OFF;
6183 } else if (svd->tr_state == SEGVN_TR_ON) {
6184 ASSERT(svd->amp != NULL);
6186 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6280 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6283 ASSERT(svd->type == MAP_PRIVATE);
6292 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6300 svd->anon_index = 0;
6304 if (svd->vp != NULL && szc != 0) {
6306 u_offset_t eoffpage = svd->offset;
6310 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6328 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6329 seg->s_size, szc, svd->prot, svd->vpage,
6330 svd->cred)) != 0) {
6334 segvn_setvnode_mpss(svd->vp);
6339 if (svd->type == MAP_PRIVATE) {
6355 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6356 struct anon_map *amp = svd->amp;
6362 ulong_t an_idx = svd->anon_index;
6363 vnode_t *vp = svd->vp;
6364 struct vpage *vpage = svd->vpage;
6367 uint_t prot = svd->prot, vpprot;
6371 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6372 ASSERT(svd->softlockcnt == 0);
6375 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6380 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6381 ASSERT(svd->amp == NULL);
6382 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6383 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6385 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6386 } else if (svd->tr_state == SEGVN_TR_ON) {
6387 ASSERT(svd->amp != NULL);
6389 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6392 if (svd->tr_state != SEGVN_TR_OFF) {
6393 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6394 svd->tr_state = SEGVN_TR_OFF;
6406 if (amp == NULL || svd->type == MAP_SHARED) {
6422 ASSERT(vpage != NULL || svd->pageprot == 0);
6432 seg, a, prot, vpage, svd->cred)) != 0) {
6441 svd->cred))) {
6445 anon_pl[0], pageflag, svd->cred)) == NULL) {
6476 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6477 struct anon_map *amp = svd->amp;
6479 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6482 struct vnode *vp = svd->vp;
6489 ASSERT(svd->type == MAP_PRIVATE);
6490 ASSERT(svd->vpage != NULL);
6577 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6583 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6587 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6600 *nsvd = *svd;
6607 nsvd->offset = svd->offset +
6626 ASSERT(svd->softlockcnt == 0);
6627 ASSERT(svd->softlockcnt_sbase == 0);
6628 ASSERT(svd->softlockcnt_send == 0);
6629 crhold(svd->cred);
6631 if (svd->vpage != NULL) {
6634 struct vpage *ovpage = svd->vpage;
6636 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6637 bcopy(ovpage, svd->vpage, bytes);
6642 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6643 struct anon_map *oamp = svd->amp, *namp;
6649 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6655 svd->anon_index + btop(seg->s_size),
6660 svd->anon_index = 0;
6664 } else if (svd->amp != NULL) {
6666 ASSERT(svd->amp == nsvd->amp);
6667 ASSERT(seg->s_szc <= svd->amp->a_szc);
6668 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6670 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6671 svd->amp->refcnt++;
6672 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6678 if (svd->swresv) {
6684 if (svd->flags & MAP_NORESERVE) {
6687 ASSERT(svd->amp);
6688 oswresv = svd->swresv;
6689 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6690 svd->anon_index, btop(seg->s_size)));
6693 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6695 if (svd->pageswap) {
6696 svd->swresv = segvn_count_swap_by_vpages(seg);
6697 ASSERT(nsvd->swresv >= svd->swresv);
6698 nsvd->swresv -= svd->swresv;
6700 ASSERT(svd->swresv == seg->s_size +
6702 svd->swresv = seg->s_size;
6732 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6738 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6743 ASSERT(svd->softlockcnt == 0);
6744 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6745 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6803 ASSERT(svd->type == MAP_SHARED);
6843 ASSERT(svd->type == MAP_SHARED);
6863 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6868 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6872 if (svd->pageprot == 0) {
6875 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6876 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6883 evp = &svd->vpage[seg_page(seg, addr + len)];
6884 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6890 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6897 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6903 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6904 if (svd->pageprot == 0) {
6906 protv[--pgno] = svd->prot;
6913 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6916 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6924 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6928 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6935 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6939 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6947 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6951 *vpp = svd->vp;
6968 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6978 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6993 if (svd->advice == MADV_RANDOM ||
6994 svd->advice == MADV_SEQUENTIAL && delta < 0)
6996 else if (svd->pageadvice && svd->vpage) {
6999 bvpp = &svd->vpage[page];
7000 evpp = &svd->vpage[page + pd];
7009 if (svd->type == MAP_SHARED)
7012 if ((amp = svd->amp) == NULL)
7015 page += svd->anon_index;
7075 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7084 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7089 if ((amp = svd->amp) != NULL)
7090 anon_index = svd->anon_index;
7117 vp = svd->vp;
7118 off = svd->offset + ptob(page);
7123 vp = svd->vp;
7124 off = svd->offset + ptob(page);
7227 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7244 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7263 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7265 if (svd->softlockcnt > 0) {
7270 if (svd->type == MAP_SHARED) {
7271 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7290 if (svd->softlockcnt > 0) {
7291 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7294 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7295 svd->amp->a_softlockcnt > 0) {
7302 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7303 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7308 vpp = svd->vpage;
7309 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7322 if (svd->type != segtype) {
7323 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7327 if (svd->prot != pageprot) {
7328 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7331 prot = svd->prot;
7333 vpp = &svd->vpage[seg_page(seg, addr)];
7335 } else if (svd->vp && svd->amp == NULL &&
7342 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7343 bflags, svd->cred, NULL);
7344 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7348 if ((amp = svd->amp) != NULL)
7349 anon_index = svd->anon_index + seg_page(seg, addr);
7360 vp = svd->vp;
7366 vp = svd->vp;
7396 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7417 } else if (svd->type == MAP_SHARED && amp != NULL) {
7424 ASSERT(svd->vp == NULL);
7453 svd->cred, NULL);
7459 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7470 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7485 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7486 if (svd->amp == NULL && svd->vp == NULL) {
7487 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7494 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7496 amp = svd->amp;
7498 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7505 anon_array_enter(amp, svd->anon_index + p, &cookie);
7506 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7540 vp = svd->vp;
7541 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7577 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7632 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7667 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7668 ASSERT(svd->type == MAP_SHARED);
7669 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7670 sp = svd->amp->a_sp;
7675 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7685 if (svd->type != segtype) {
7686 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7689 if (svd->pageprot == 0 && svd->prot != pageprot) {
7690 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7696 if (svd->tr_state == SEGVN_TR_INIT) {
7697 svd->tr_state = SEGVN_TR_OFF;
7698 } else if (svd->tr_state == SEGVN_TR_ON) {
7699 ASSERT(svd->amp != NULL);
7701 ASSERT(svd->amp == NULL &&
7702 svd->tr_state == SEGVN_TR_OFF);
7712 if ((vpp = svd->vpage) == NULL) {
7715 if (svd->vpage == NULL) {
7716 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7720 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7730 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7731 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7732 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7733 svd->amp->a_szc = seg->s_szc;
7736 if ((amp = svd->amp) != NULL) {
7737 anon_index = svd->anon_index + seg_page(seg, addr);
7740 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7741 evp = &svd->vpage[seg_page(seg, addr + len)];
7750 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7763 i_edx = svd->anon_index + seg_page(seg, addr + len);
7794 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7803 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7816 if (op == MC_LOCK && svd->vp == NULL &&
7817 ((svd->flags & MAP_NORESERVE) == 0) &&
7826 svd->cred);
7853 if (svd->vp == NULL &&
7854 (svd->flags & MAP_NORESERVE)) {
7859 vp = svd->vp;
7867 vp = svd->vp;
7891 S_OTHER, svd->cred, NULL);
7918 if (error && svd->vp) {
7920 if (VOP_GETATTR(svd->vp, &va, 0,
7921 svd->cred, NULL) != 0) {
7953 (svd->type == MAP_PRIVATE));
8039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8065 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8083 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8084 if (svd->tr_state != SEGVN_TR_OFF) {
8085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8089 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8101 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8104 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8117 if (svd->softlockcnt > 0) {
8122 if (svd->type == MAP_SHARED) {
8123 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8135 if (svd->softlockcnt > 0) {
8142 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8145 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8146 svd->amp->a_softlockcnt > 0) {
8156 amp = svd->amp;
8157 vp = svd->vp;
8169 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8180 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8189 svd->anon_index + page, len, behav, &purged);
8191 if (purged != 0 && (svd->flags & MAP_NORESERVE)) {
8203 atomic_add_long(&svd->swresv, -bytes);
8208 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8232 policy = lgrp_madv_to_policy(behav, len, svd->type);
8233 if (svd->type == MAP_SHARED)
8235 svd->anon_index, vp, svd->offset, len);
8243 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8248 &svd->policy_info, len);
8263 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8264 vp, svd->offset, 1);
8271 if (already_set || svd->type == MAP_SHARED)
8295 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8309 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8315 svd->advice = (uchar_t)behav;
8316 svd->pageadvice = 0;
8336 if (svd->vpage == NULL) {
8337 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8367 policy = lgrp_madv_to_policy(behav, len, svd->type);
8369 anon_index = svd->anon_index + page;
8370 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8372 if (svd->type == MAP_SHARED)
8377 (policy == svd->policy_info.mem_policy);
8392 if (svd->type == MAP_PRIVATE &&
8394 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8402 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8403 vp, svd->offset, 1);
8410 if (already_set || svd->type == MAP_SHARED)
8413 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8414 ASSERT(svd->amp == NULL);
8415 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8416 ASSERT(svd->softlockcnt == 0);
8417 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8419 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8434 if (svd->softlockcnt > 0)
8479 if (svd->softlockcnt > 0)
8509 &svd->policy_info, seg->s_size);
8532 &svd->lock);
8543 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8548 bvpp = &svd->vpage[page];
8549 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8552 svd->advice = MADV_NORMAL;
8563 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8575 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8586 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8592 if (svd->tr_state != SEGVN_TR_OFF ||
8593 svd->type != MAP_PRIVATE ||
8594 svd->vp != NULL) {
8603 if (svd->svn_inz == SEGVN_INZ_ALL) {
8612 svd->svn_inz = SEGVN_INZ_ALL;
8621 if (svd->vpage == NULL) {
8623 if (svd->vpage == NULL) {
8629 svd->svn_inz = SEGVN_INZ_VPP;
8631 bvpp = &svd->vpage[page];
8632 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8638 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8648 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8652 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8658 if (svd->vpage == NULL) {
8683 svd->pageadvice = 1;
8684 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8685 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8686 for (vp = svd->vpage; vp < evp; vp++) {
8687 VPP_SETPROT(vp, svd->prot);
8688 VPP_SETADVICE(vp, svd->advice);
8699 struct segvn_data *svd;
8710 svd = (struct segvn_data *)seg->s_data;
8711 vp = svd->vp;
8712 off = offset = svd->offset;
8715 if ((amp = svd->amp) != NULL) {
8716 anon_index = svd->anon_index;
8724 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8727 vp = svd->vp;
8809 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8852 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8860 if (svd->vp != NULL) {
8867 if ((amp = svd->amp) == NULL) {
8945 if (svd->type == MAP_PRIVATE) {
8953 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8955 if (aaix < svd->anon_index) {
8963 if (svd->pageprot && lpgaddr != addr) {
8964 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8965 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8979 if (svd->type == MAP_PRIVATE) {
8984 ulong_t aix = svd->anon_index +
9000 if (svd->pageprot && lpgeaddr != addr + len) {
9004 vp = &svd->vpage[seg_page(seg, addr + len)];
9005 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
9026 if (svd->type == MAP_SHARED) {
9029 ptob(svd->anon_index));
9070 ASSERT(svd->type == MAP_SHARED);
9073 ptob(svd->anon_index));
9089 ASSERT(svd->type == MAP_SHARED);
9090 ASSERT(svd->softlockcnt >= npages);
9091 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
9095 ASSERT(svd->softlockcnt_sbase > 0);
9096 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
9099 ASSERT(svd->softlockcnt_send > 0);
9100 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
9112 if (svd->softlockcnt == 0) {
9132 ASSERT(svd->type == MAP_PRIVATE);
9136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9152 ASSERT(svd->type == MAP_SHARED);
9153 if (svd->pageprot == 0) {
9154 if ((svd->prot & protchk) == 0) {
9176 vp = &svd->vpage[seg_page(seg, a)];
9192 ASSERT(svd->type == MAP_SHARED);
9193 atomic_add_long((ulong_t *)&svd->softlockcnt,
9197 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9200 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9202 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9214 ASSERT(svd->type == MAP_PRIVATE);
9215 if (svd->pageprot == 0) {
9216 if ((svd->prot & protchk) == 0) {
9220 if (svd->prot & PROT_WRITE) {
9236 vp = &svd->vpage[seg_page(seg, a)];
9292 anon_index = svd->anon_index + page;
9334 if (svd->vpage != NULL) {
9335 vpage = &svd->vpage[seg_page(seg, a)];
9382 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9384 ASSERT(svd->type == MAP_SHARED);
9390 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9393 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9399 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9428 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9433 if (svd->amp == NULL || svd->vp != NULL) {
9442 if (svd->type == MAP_PRIVATE) {
9443 if (svd->softlockcnt) {
9446 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9447 seg_ppurge(seg, svd->amp, 0);
9462 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9469 ASSERT(svd->vp == NULL && svd->amp != NULL);
9470 ASSERT(svd->softlockcnt >= npages);
9510 mutex_enter(&svd->segfree_syncmtx);
9513 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9528 mutex_exit(&svd->segfree_syncmtx);
9588 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9594 if (svd->type == MAP_PRIVATE) {
9600 if (svd->type == MAP_SHARED) {
9601 if (svd->vp) {
9602 memidp->val[0] = (uintptr_t)svd->vp;
9603 memidp->val[1] = (u_longlong_t)svd->offset +
9608 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9609 if ((amp = svd->amp) != NULL) {
9610 anon_index = svd->anon_index +
9613 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9623 pp = anon_zero(seg, addr, &ap, svd->cred);
9650 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9655 if (svd->pageprot == 0)
9658 ASSERT(svd->vpage != NULL);
9660 vpage = &svd->vpage[seg_page(seg, a)];
9728 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9729 * svd->amp remains as NULL.
9734 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9735 vnode_t *vp = svd->vp;
9736 u_offset_t off = svd->offset;
9750 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9752 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9753 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9754 ASSERT(svd->flags & MAP_TEXT);
9755 ASSERT(svd->type == MAP_PRIVATE);
9756 ASSERT(vp != NULL && svd->amp == NULL);
9757 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9758 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9767 svd->tr_state = SEGVN_TR_OFF;
9776 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9777 svd->tr_state = SEGVN_TR_OFF;
9782 svd->tr_state = SEGVN_TR_OFF;
9807 svd->tr_state = SEGVN_TR_OFF;
9836 svd->tr_state = SEGVN_TR_OFF;
9856 svd->tr_state = SEGVN_TR_OFF;
9868 svd->tr_state = SEGVN_TR_OFF;
9973 ASSERT(svd->svn_trnext == NULL);
9974 ASSERT(svd->svn_trprev == NULL);
9975 svd->svn_trnext = svntrp->tr_svnhead;
9976 svd->svn_trprev = NULL;
9978 svntrp->tr_svnhead->svn_trprev = svd;
9980 svntrp->tr_svnhead = svd;
9983 svd->amp = amp;
9984 svd->anon_index = 0;
9985 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9986 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9987 svd->tr_state = SEGVN_TR_ON;
10004 svd->tr_state = SEGVN_TR_OFF;
10018 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
10019 vnode_t *vp = svd->vp;
10020 u_offset_t off = svd->offset;
10027 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
10032 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10033 ASSERT(svd->tr_state == SEGVN_TR_ON);
10034 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10035 ASSERT(svd->amp != NULL);
10036 ASSERT(svd->amp->refcnt >= 1);
10037 ASSERT(svd->anon_index == 0);
10053 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
10056 svd->tr_state = SEGVN_TR_OFF;
10057 svd->amp = NULL;
10058 if (svd->svn_trprev == NULL) {
10059 ASSERT(svntrp->tr_svnhead == svd);
10060 svntrp->tr_svnhead = svd->svn_trnext;
10064 svd->svn_trnext = NULL;
10066 svd->svn_trprev->svn_trnext = svd->svn_trnext;
10067 if (svd->svn_trnext != NULL) {
10068 svd->svn_trnext->svn_trprev = svd->svn_trprev;
10069 svd->svn_trnext = NULL;
10071 svd->svn_trprev = NULL;
10189 segvn_data_t *svd;
10198 svd = svntrp->tr_svnhead;
10199 for (; svd != NULL; svd = svd->svn_trnext) {
10200 segvn_trupdate_seg(svd->seg, svd, svntrp,
10210 segvn_data_t *svd,
10220 ASSERT(svd->vp != NULL);
10221 ASSERT(svd->vp == svntrp->tr_vp);
10222 ASSERT(svd->offset == svntrp->tr_off);
10223 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10225 ASSERT(svd->seg == seg);
10226 ASSERT(seg->s_data == (void *)svd);
10228 ASSERT(svd->tr_state == SEGVN_TR_ON);
10229 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10230 ASSERT(svd->amp != NULL);
10231 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10232 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10233 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10234 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10248 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10263 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10275 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10282 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10290 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10303 * threads a chance. svntr and svd can't be unlinked as long as
10313 ASSERT(svd->tr_state == SEGVN_TR_ON);
10314 ASSERT(svd->amp != NULL);
10315 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10316 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10317 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10319 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10320 svd->amp = svntrp->tr_amp[lgrp_id];
10322 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10326 ASSERT(svd->vp == svntrp->tr_vp);
10327 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10328 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10329 ASSERT(svd->seg == seg);
10330 ASSERT(svd->tr_state == SEGVN_TR_ON);