Lines Matching refs:hmeblkp

113 #define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 	 	 \
119 int _ttesz = get_hblk_ttesz(hmeblkp); \
128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
129 _heva = get_hblk_endaddr(hmeblkp); \
146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
854 int ttesz = get_hblk_ttesz(hmeblkp); \
859 (caddr_t)get_hblk_base(hmeblkp); \
860 caddr_t eva = sva + get_hblk_span(hmeblkp); \
991 #define HBLKTOHME(hment, hmeblkp, addr) \
994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
998 * Version of HBLKTOHME that also returns the index in hmeblkp
1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1992 struct hme_blk *hmeblkp;
2024 hmeblkp = hmebp->hmeblkp;
2026 while (hmeblkp) {
2028 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2029 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2030 ASSERT(!hmeblkp->hblk_shared);
2031 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2032 (caddr_t)get_hblk_base(hmeblkp),
2033 get_hblk_endaddr(hmeblkp),
2036 nx_hblk = hmeblkp->hblk_next;
2037 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2038 ASSERT(!hmeblkp->hblk_lckcnt);
2039 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2042 pr_hblk = hmeblkp;
2044 hmeblkp = nx_hblk;
2563 struct hme_blk *hmeblkp;
2577 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2579 ASSERT(hmeblkp);
2591 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2880 struct hme_blk *hmeblkp;
2899 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2901 ASSERT(hmeblkp);
2906 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2950 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2963 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2970 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2980 if (hmeblkp == NULL) {
2981 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2983 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2984 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2992 if (get_hblk_ttesz(hmeblkp) != size) {
2993 ASSERT(!hmeblkp->hblk_vcnt);
2994 ASSERT(!hmeblkp->hblk_hmecnt);
2995 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2999 if (hmeblkp->hblk_shw_bit) {
3004 ASSERT(!hmeblkp->hblk_shared);
3005 if (hmeblkp->hblk_shw_mask) {
3006 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3010 hmeblkp->hblk_shw_bit = 0;
3024 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3025 ASSERT(!hmeblkp->hblk_shw_bit);
3026 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3027 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3028 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3030 return (hmeblkp);
3038 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3111 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3112 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3114 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3146 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3147 (void *)hmeblkp);
3194 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3196 (void *)hmeblkp);
3198 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3200 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3226 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3233 atomic_inc_16(&hmeblkp->hblk_vcnt);
3316 if (hmeblkp->hblk_shared) {
3318 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3322 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3352 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3353 ASSERT(hmeblkp->hblk_hmecnt > 0);
3356 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3594 struct hme_blk *hmeblkp;
3620 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3621 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3622 if (hmeblkp == NULL) {
3623 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3626 ASSERT(hmeblkp);
3627 if (!hmeblkp->hblk_shw_mask) {
3633 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3634 hmeblkp->hblk_shw_bit = 1;
3635 } else if (hmeblkp->hblk_shw_bit == 0) {
3636 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3637 (void *)hmeblkp);
3639 ASSERT(hmeblkp->hblk_shw_bit == 1);
3640 ASSERT(!hmeblkp->hblk_shared);
3647 shw_mask = hmeblkp->hblk_shw_mask;
3649 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3655 return (hmeblkp);
3668 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3674 ASSERT(hmeblkp->hblk_shw_bit);
3675 ASSERT(!hmeblkp->hblk_shared);
3679 if (!hmeblkp->hblk_shw_mask) {
3680 hmeblkp->hblk_shw_bit = 0;
3683 addr = (caddr_t)get_hblk_base(hmeblkp);
3684 endaddr = get_hblk_endaddr(hmeblkp);
3685 size = get_hblk_ttesz(hmeblkp);
3702 struct hme_blk *hmeblkp;
3717 hmeblkp = hmebp->hmeblkp;
3719 while (hmeblkp) {
3720 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3722 ASSERT(!hmeblkp->hblk_shared);
3723 if (hmeblkp->hblk_shw_bit) {
3724 if (hmeblkp->hblk_shw_mask) {
3727 hmeblkp, hmebp);
3730 hmeblkp->hblk_shw_bit = 0;
3743 nx_hblk = hmeblkp->hblk_next;
3744 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3748 pr_hblk = hmeblkp;
3750 hmeblkp = nx_hblk;
3781 struct hme_blk *hmeblkp;
3796 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3797 if (hmeblkp != NULL) {
3798 ASSERT(hmeblkp->hblk_shared);
3799 ASSERT(!hmeblkp->hblk_shw_bit);
3800 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3803 ASSERT(!hmeblkp->hblk_lckcnt);
3804 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3829 struct hme_blk *hmeblkp;
3845 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3846 if (hmeblkp != NULL) {
3847 ASSERT(hmeblkp->hblk_shared);
3848 ASSERT(!hmeblkp->hblk_lckcnt);
3849 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3850 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3854 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3855 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3938 struct hme_blk *hmeblkp, *list = NULL;
3964 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3965 if (hmeblkp != NULL) {
3966 ASSERT(!hmeblkp->hblk_shared);
3974 if (hmeblkp->hblk_shw_bit) {
3977 addr = sfmmu_hblk_unlock(hmeblkp, addr,
4015 struct hme_blk *hmeblkp;
4060 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4062 if (hmeblkp == NULL) {
4067 ASSERT(hmeblkp->hblk_shared);
4068 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4088 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4094 ASSERT(in_hblk_range(hmeblkp, addr));
4095 ASSERT(hmeblkp->hblk_shw_bit == 0);
4097 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4098 ttesz = get_hblk_ttesz(hmeblkp);
4100 HBLKTOHME(sfhme, hmeblkp, addr);
4114 if (hmeblkp->hblk_lckcnt == 0)
4121 ASSERT(hmeblkp->hblk_lckcnt > 0);
4122 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4123 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4291 struct hme_blk *hmeblkp;
4335 for (hashno = TTE64K, hmeblkp = NULL;
4336 hmeblkp == NULL && hashno <= mmu_hashcnt;
4347 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4349 if (hmeblkp == NULL)
4353 if (hmeblkp == NULL) {
4359 ASSERT(!hmeblkp->hblk_shared);
4361 HBLKTOHME(osfhmep, hmeblkp, saddr);
4375 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4520 struct hme_blk *hmeblkp;
4543 for (hashno = TTE64K, hmeblkp = NULL;
4544 hmeblkp == NULL && hashno <= mmu_hashcnt;
4555 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4557 if (hmeblkp == NULL)
4561 if (hmeblkp == NULL)
4564 ASSERT(!hmeblkp->hblk_shared);
4566 HBLKTOHME(osfhmep, hmeblkp, saddr);
4839 struct hme_blk *hmeblkp, *list = NULL;
4869 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4870 if (hmeblkp != NULL) {
4871 ASSERT(!hmeblkp->hblk_shared);
4876 if (hmeblkp->hblk_shw_bit) {
4883 hmeblkp, addr, endaddr, &dmr, attr, mode);
4929 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4943 ASSERT(in_hblk_range(hmeblkp, addr));
4944 ASSERT(hmeblkp->hblk_shw_bit == 0);
4945 ASSERT(!hmeblkp->hblk_shared);
4947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4948 ttesz = get_hblk_ttesz(hmeblkp);
4967 HBLKTOHME(sfhmep, hmeblkp, addr);
5040 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5168 struct hme_blk *hmeblkp, *list = NULL;
5198 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5199 if (hmeblkp != NULL) {
5200 ASSERT(!hmeblkp->hblk_shared);
5205 if (hmeblkp->hblk_shw_bit) {
5211 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5257 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5273 ASSERT(in_hblk_range(hmeblkp, addr));
5274 ASSERT(hmeblkp->hblk_shw_bit == 0);
5275 ASSERT(!hmeblkp->hblk_shared);
5278 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5279 (endaddr < get_hblk_endaddr(hmeblkp))) {
5284 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5285 ttesz = get_hblk_ttesz(hmeblkp);
5293 HBLKTOHME(sfhmep, hmeblkp, addr);
5368 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5456 struct hme_blk *hmeblkp;
5484 hmeblkp = hmebp->hmeblkp;
5486 while (hmeblkp) {
5487 nx_hblk = hmeblkp->hblk_next;
5493 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5494 hmeblkp->hblk_shw_bit ||
5495 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5496 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5497 pr_hblk = hmeblkp;
5501 ASSERT(!hmeblkp->hblk_shared);
5505 if (hmeblkp->hblk_vcnt != 0 ||
5506 hmeblkp->hblk_hmecnt != 0)
5507 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5515 !hmeblkp->hblk_vcnt &&
5516 !hmeblkp->hblk_hmecnt) {
5517 ASSERT(!hmeblkp->hblk_lckcnt);
5518 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5521 pr_hblk = hmeblkp;
5555 hmeblkp = nx_hblk;
5601 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5686 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5687 if (hmeblkp == NULL) {
5730 ASSERT(hmeblkp);
5731 ASSERT(!hmeblkp->hblk_shared);
5732 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5742 get_hblk_span(hmeblkp));
5745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5778 if (hmeblkp->hblk_shw_bit) {
5802 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5809 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5810 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5918 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5930 ASSERT(in_hblk_range(hmeblkp, addr));
5931 ASSERT(!hmeblkp->hblk_shw_bit);
5932 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5933 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5934 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5937 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5938 (endaddr < get_hblk_endaddr(hmeblkp))) {
5943 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5944 ttesz = get_hblk_ttesz(hmeblkp);
5955 HBLKTOHME(sfhmep, hmeblkp, addr);
6028 ASSERT(hmeblkp->hblk_lckcnt > 0);
6029 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6030 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6066 ASSERT(!hmeblkp->hblk_shared);
6067 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6075 ASSERT(hmeblkp->hblk_hmecnt > 0);
6079 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6083 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6086 ASSERT(hmeblkp->hblk_vcnt > 0);
6087 atomic_dec_16(&hmeblkp->hblk_vcnt);
6089 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6090 !hmeblkp->hblk_lckcnt);
6128 } else if (hmeblkp->hblk_hmecnt != 0) {
6223 struct hme_blk *hmeblkp, *list = NULL;
6254 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6255 if (hmeblkp != NULL) {
6256 ASSERT(!hmeblkp->hblk_shared);
6261 if (hmeblkp->hblk_shw_bit) {
6267 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6302 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6312 ASSERT(hmeblkp->hblk_shw_bit == 0);
6313 ASSERT(!hmeblkp->hblk_shared);
6315 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6317 ttesz = get_hblk_ttesz(hmeblkp);
6318 HBLKTOHME(sfhmep, hmeblkp, addr);
6354 hmeblkp, 0, 0);
6614 struct hme_blk *hmeblkp;
6661 hmeblkp = sfmmu_hmetohblk(sfhmep);
6663 sfmmup = hblktosfmmu(hmeblkp);
6665 ASSERT(!hmeblkp->hblk_shared);
6667 addr = tte_to_vaddr(hmeblkp, tte);
6674 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
7025 struct hme_blk *hmeblkp;
7067 hmeblkp = sfmmu_hmetohblk(sfhme);
7073 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7074 hmeblkp->hblk_tag.htag_id == ksfmmup)
7150 struct hme_blk *hmeblkp;
7167 hmeblkp = sfmmu_hmetohblk(sfhme);
7172 sfmmup = hblktosfmmu(hmeblkp);
7173 ttesz = get_hblk_ttesz(hmeblkp);
7196 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7205 addr = tte_to_vaddr(hmeblkp, tte);
7207 if (hmeblkp->hblk_shared) {
7209 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7215 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7216 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7251 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7258 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7271 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7278 * we are done with hmeblkp so that this hmeblk won't be
7281 ASSERT(hmeblkp->hblk_hmecnt > 0);
7282 ASSERT(hmeblkp->hblk_vcnt > 0);
7283 atomic_dec_16(&hmeblkp->hblk_vcnt);
7284 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7288 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7289 * !hmeblkp->hblk_lckcnt);
7373 struct hme_blk *hmeblkp;
7423 hmeblkp = sfmmu_hmetohblk(sfhme);
7429 if (hmeblkp->hblk_shared) {
7430 sf_srd_t *srdp = hblktosrd(hmeblkp);
7431 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7437 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7493 struct hme_blk *hmeblkp;
7511 hmeblkp = sfmmu_hmetohblk(sfhme);
7512 sfmmup = hblktosfmmu(hmeblkp);
7513 addr = tte_to_vaddr(hmeblkp, tte);
7529 if (hmeblkp->hblk_shared) {
7532 hmeblkp->hblk_tag.htag_rid;
7538 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7541 rgnp, hmeblkp, 1);
7543 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7549 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7567 struct hme_blk *hmeblkp;
7582 hmeblkp = sfmmu_hmetohblk(sfhme);
7583 sfmmup = hblktosfmmu(hmeblkp);
7584 addr = tte_to_vaddr(hmeblkp, tte);
7600 if (hmeblkp->hblk_shared) {
7602 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7608 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7611 rgnp, hmeblkp, 1);
7613 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7878 struct hme_blk *hmeblkp = NULL;
7937 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7938 if (hmeblkp != NULL) {
7939 ASSERT(!hmeblkp->hblk_shared);
7940 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7968 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7969 hmeblkp = hmeblkp->hblk_next) {
7975 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7979 ASSERT(hmeblkp->hblk_shared);
7980 rid = hmeblkp->hblk_tag.htag_rid;
7984 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7985 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7991 get_hblk_ttesz(hmeblkp) > TTE8K) {
7992 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8008 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8094 struct hme_blk *hmeblkp;
8121 hmeblkp = sfmmu_hmetohblk(sfhme);
8126 if (hmeblkp->hblk_shared) {
8127 sf_srd_t *srdp = hblktosrd(hmeblkp);
8128 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8134 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8889 struct hme_blk *hmeblkp;
8892 hmeblkp = (struct hme_blk *)buf;
8893 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8896 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8909 struct hme_blk *hmeblkp;
8911 hmeblkp = (struct hme_blk *)buf;
8912 mutex_destroy(&hmeblkp->hblk_audit_lock);
8931 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8977 hmeblkp = hmebp->hmeblkp;
8979 while (hmeblkp) {
8980 nx_hblk = hmeblkp->hblk_next;
8981 if (!hmeblkp->hblk_vcnt &&
8982 !hmeblkp->hblk_hmecnt) {
8983 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8986 pr_hblk = hmeblkp;
8988 hmeblkp = nx_hblk;
9003 hmeblkp = hmebp->hmeblkp;
9005 while (hmeblkp) {
9006 nx_hblk = hmeblkp->hblk_next;
9007 if (!hmeblkp->hblk_vcnt &&
9008 !hmeblkp->hblk_hmecnt) {
9009 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9012 pr_hblk = hmeblkp;
9014 hmeblkp = nx_hblk;
9084 struct hme_blk *hmeblkp;
9156 hmeblkp = sfmmu_hmetohblk(sfhmep);
9157 tmphat = hblktosfmmu(hmeblkp);
9160 if (hmeblkp->hblk_shared || tmphat == hat ||
9161 hmeblkp->hblk_lckcnt) {
9182 hmeblkp = sfmmu_hmetohblk(sfhmep);
9183 ASSERT(!hmeblkp->hblk_shared);
9292 struct hme_blk *hmeblkp;
9329 hmeblkp = sfmmu_hmetohblk(sfhme);
9334 vaddr = tte_to_vaddr(hmeblkp, tte);
9461 struct hme_blk *hmeblkp;
9475 hmeblkp = sfmmu_hmetohblk(sfhme);
9479 vaddr = tte_to_vaddr(hmeblkp, tte);
9505 sfmmup = hblktosfmmu(hmeblkp);
9510 if (hmeblkp->hblk_shared) {
9512 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9518 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9521 hmeblkp, 0);
9529 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9532 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9545 if (hmeblkp->hblk_shared) {
9547 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9553 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9556 hmeblkp, 0);
9563 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9566 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
10572 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10576 ASSERT(hmeblkp->hblk_hmecnt == 0);
10577 ASSERT(hmeblkp->hblk_vcnt == 0);
10578 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10596 hmeblkp->hblk_next = freehblkp;
10597 freehblkp = hmeblkp;
10678 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10933 struct hme_blk *hmeblkp = NULL;
10971 hmeblkp =
10986 hmeblkp =
11061 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11074 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11075 hmeblkp = sfmmu_hblk_steal(size);
11079 * swap hblk_reserve with hmeblkp and
11085 sfmmu_hblk_swap(hmeblkp);
11095 if (sfmmu_put_free_hblk(hmeblkp, 0))
11106 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11113 hmeblkp = HBLK_RESERVE;
11119 ASSERT(hmeblkp != NULL);
11120 set_hblk_sz(hmeblkp, size);
11121 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11126 if (hmeblkp != HBLK_RESERVE) {
11161 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11165 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11225 hmeblkp->hblk_shared = 1;
11227 hmeblkp->hblk_shared = 0;
11229 set_hblk_sz(hmeblkp, size);
11231 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11232 hmeblkp->hblk_tag = hblktag;
11233 hmeblkp->hblk_shadow = shw_hblkp;
11234 hblkpa = hmeblkp->hblk_nextpa;
11235 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11237 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11238 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11239 ASSERT(hmeblkp->hblk_hmecnt == 0);
11240 ASSERT(hmeblkp->hblk_vcnt == 0);
11241 ASSERT(hmeblkp->hblk_lckcnt == 0);
11242 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11243 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11244 return (hmeblkp);
11254 struct hme_blk *hmeblkp, *next_hmeblkp;
11261 hmeblkp = *listp;
11262 while (hmeblkp != NULL) {
11263 next_hmeblkp = hmeblkp->hblk_next;
11264 ASSERT(!hmeblkp->hblk_hmecnt);
11265 ASSERT(!hmeblkp->hblk_vcnt);
11266 ASSERT(!hmeblkp->hblk_lckcnt);
11267 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11268 ASSERT(hmeblkp->hblk_shared == 0);
11269 ASSERT(hmeblkp->hblk_shw_bit == 0);
11270 ASSERT(hmeblkp->hblk_shadow == NULL);
11272 hblkpa = va_to_pa((caddr_t)hmeblkp);
11274 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11276 size = get_hblk_ttesz(hmeblkp);
11277 hmeblkp->hblk_next = NULL;
11278 hmeblkp->hblk_nextpa = hblkpa;
11280 if (hmeblkp->hblk_nuc_bit == 0) {
11283 !sfmmu_put_free_hblk(hmeblkp, critical))
11284 kmem_cache_free(get_hblk_cache(hmeblkp),
11285 hmeblkp);
11287 hmeblkp = next_hmeblkp;
11309 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11316 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11317 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11318 ASSERT(hmeblkp->hblk_hmecnt == 0);
11319 ASSERT(hmeblkp->hblk_vcnt == 0);
11320 return (hmeblkp);
11326 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11327 return (hmeblkp);
11334 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11337 hmeblkp = hmebp->hmeblkp;
11340 while (hmeblkp) {
11346 if ((get_hblk_ttesz(hmeblkp) == size) &&
11347 (hmeblkp->hblk_shw_bit == 0 ||
11348 hmeblkp->hblk_vcnt == 0) &&
11349 (hmeblkp->hblk_lckcnt == 0)) {
11356 if ((hmeblkp->hblk_vcnt == 0 &&
11357 hmeblkp->hblk_hmecnt == 0) || (i >=
11360 hmeblkp, hblkpa, pr_hblk)) {
11369 pr_hblk = hmeblkp;
11370 hblkpa = hmeblkp->hblk_nextpa;
11371 hmeblkp = hmeblkp->hblk_next;
11380 if (hmeblkp != NULL)
11389 hmeblkp = hmebp->hmeblkp;
11392 while (hmeblkp) {
11396 if ((get_hblk_ttesz(hmeblkp) == size) &&
11397 (hmeblkp->hblk_lckcnt == 0) &&
11398 (hmeblkp->hblk_vcnt == 0) &&
11399 (hmeblkp->hblk_hmecnt == 0)) {
11401 hmeblkp, hblkpa, pr_hblk)) {
11412 pr_hblk = hmeblkp;
11413 hblkpa = hmeblkp->hblk_nextpa;
11414 hmeblkp = hmeblkp->hblk_next;
11422 if (hmeblkp != NULL)
11426 return (hmeblkp);
11437 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11451 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11455 sfmmup = hblktosfmmu(hmeblkp);
11456 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11460 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11461 (caddr_t)get_hblk_base(hmeblkp),
11462 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11464 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11474 ASSERT(hmeblkp->hblk_lckcnt == 0);
11475 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11477 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11478 hmeblkp->hblk_nextpa = hblkpa;
11480 shw_hblkp = hmeblkp->hblk_shadow;
11482 ASSERT(!hmeblkp->hblk_shared);
11484 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11497 hmeblkp->hblk_shadow = NULL;
11505 hmeblkp->hblk_shw_bit = 0;
11507 if (hmeblkp->hblk_shared) {
11512 srdp = hblktosrd(hmeblkp);
11514 rid = hmeblkp->hblk_tag.htag_rid;
11519 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11520 hmeblkp->hblk_shared = 0;
11532 struct hme_blk *hmeblkp;
11542 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11545 return (hmeblkp);
11945 struct hme_blk *hmeblkp, int uselocks)
11955 ASSERT(hmeblkp->hblk_shared);
11988 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12063 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12076 ASSERT(!hmeblkp->hblk_shared);
12123 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12159 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12169 ASSERT(!hmeblkp->hblk_shared);
12202 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12248 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12254 ASSERT(!hmeblkp->hblk_shared);
12267 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
13170 struct hme_blk *hmeblkp;
13193 hmeblkp = (struct hme_blk *)addr;
13195 hmeblkp->hblk_nuc_bit = 1;
13196 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13205 hmeblkp = (struct hme_blk *)addr;
13207 hmeblkp->hblk_nuc_bit = 1;
13208 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13399 * *hmeblkp is currently unused.
13404 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13414 hmeblkp = hmeblkp;
15458 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15463 if (hmebp->hmeblkp == NULL) {
15468 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15477 hmeblkp->hblk_next = hmebp->hmeblkp;
15478 hmebp->hmeblkp = hmeblkp;
15503 * hmeblkp - address of hmeblk to be removed
15504 * pr_hblk - virtual address of previous hmeblkp
15514 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15527 if (hmebp->hmeblkp == hmeblkp) {
15528 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15529 hmebp->hmeblkp = hmeblkp->hblk_next;
15531 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15532 pr_hblk->hblk_next = hmeblkp->hblk_next;
15535 size = get_hblk_ttesz(hmeblkp);
15536 shw_hblkp = hmeblkp->hblk_shadow;
15538 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15539 ASSERT(!hmeblkp->hblk_shared);
15549 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15562 hmeblkp->hblk_shadow = NULL;
15564 hmeblkp->hblk_shw_bit = 0;
15566 if (hmeblkp->hblk_shared) {
15572 srdp = hblktosrd(hmeblkp);
15574 rid = hmeblkp->hblk_tag.htag_rid;
15579 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15581 hmeblkp->hblk_shared = 0;
15590 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15591 hmeblkp->hblk_next = NULL;
15593 /* Append hmeblkp to listp for processing later. */
15594 hmeblkp->hblk_next = *listp;
15595 *listp = hmeblkp;
15607 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15623 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15624 hmeblkp = hmeblkp->hblk_next) {
15625 if (get_hblk_ttesz(hmeblkp) == size) {
15628 hmeblkp->hblk_next;
15631 hmeblkp->hblk_next;
15638 last_hmeblkp = hmeblkp;
15649 return (hmeblkp);