Lines Matching refs:hat
34 * described in <vm/hat.h> while the machine dependent interface
37 * The hat layer manages the address translation hardware as a cache
43 #include <vm/hat.h>
90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
97 ASSERT((hat) != ksfmmup); \
98 _srdp = (hat)->sfmmu_srdp; \
145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
179 * SFMMU specific hat functions
226 * Private sfmmu data structures for hat management
397 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
399 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
410 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
412 static void hat_do_memload(struct hat *, caddr_t, struct page *,
414 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
416 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
433 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
442 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
443 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
523 static void iment_add(struct ism_ment *, struct hat *);
524 static void iment_sub(struct ism_ment *, struct hat *);
706 sfmmu_t *ksfmmup; /* kernel's hat id */
1319 * We grab the first hat for the kernel,
1419 * Initialize locking for the hat layer, called early during boot.
1453 * Allocate a hat structure.
1454 * Called when an address space first uses a hat.
1456 struct hat *
1563 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1865 hat_setup(struct hat *sfmmup, int allocflag)
1915 hat_free_start(struct hat *sfmmup)
1929 hat_free_end(struct hat *sfmmup)
1979 hat_swapin(struct hat *hat)
1989 hat_swapout(struct hat *sfmmup)
2121 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 ASSERT(hat != ksfmmup);
2133 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2139 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2152 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2153 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2158 if ((scdp = hat->sfmmu_scdp) != NULL &&
2169 hat->sfmmu_ismttecnt[i];
2171 hat->sfmmu_scdismttecnt[i];
2180 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2186 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2189 hat_do_memload(hat, addr, pp, attr, flags,
2194 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2199 hat_do_memload(hat, addr, pp, attr, flags,
2205 hat_do_memload(hat, addr, pp, attr, flags, rid);
2214 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2220 ASSERT(hat != NULL);
2225 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2232 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2238 if (hat->sfmmu_rmstat)
2239 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2242 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2252 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2258 sfmmu_check_page_sizes(hat, 1);
2271 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2278 ASSERT(hat != NULL);
2282 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2290 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2321 if (hat->sfmmu_rmstat)
2322 hat_resvstat(len, hat->sfmmu_as, addr);
2352 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2368 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2388 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2395 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2407 sfmmu_check_page_sizes(hat, 1);
2412 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2415 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2420 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2426 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2432 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2441 * promotion/demotion of page size is not up to the hat but up to
2447 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2458 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2460 if (hat->sfmmu_rmstat)
2461 hat_resvstat(len, hat->sfmmu_as, addr);
2464 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2483 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2516 if (!sfmmu_tteload_array(hat, &tte, addr,
2532 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2541 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2549 sfmmu_check_page_sizes(hat, 1);
2557 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2570 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2577 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2591 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2673 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2872 * for <addr,hat> at page array pps. It assumes addr and first
3933 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3993 "addr %p hat %p", (void *)addr, (void *)sfmmup);
4003 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4076 "addr %p hat %p", (void *)va, (void *)sfmmup);
4730 hat_probe(struct hat *sfmmup, caddr_t addr)
4755 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4774 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4799 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4801 ASSERT(hat->sfmmu_as != NULL);
4803 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4811 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4813 ASSERT(hat->sfmmu_as != NULL);
4815 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4822 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4824 ASSERT(hat->sfmmu_as != NULL);
4826 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4833 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4929 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5153 * hat_chgprot is a deprecated hat call. New segment drivers
5163 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5449 struct hat *sfmmup,
5592 struct hat *sfmmup,
5655 * translations. In order to speed this up the sfmmu hat supports
5886 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5918 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6155 * at exit time, after return from hat layer, VM will
6195 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6218 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6302 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6370 * update the hat stats. Currently it allows us to pass a NULL pp
6375 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6411 * currently belong in common/vm and not in hat where
7831 hat_getpfnum(struct hat *hat, caddr_t addr)
7847 if (hat == ksfmmup) {
7865 return (sfmmu_uvatopfn(addr, hat, NULL));
7873 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
8027 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8029 ASSERT(hat != NULL);
8353 hat_get_mapped_size(struct hat *hat)
8358 if (hat == NULL)
8362 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8363 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8365 if (hat->sfmmu_iblk == NULL)
8369 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8370 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8376 hat_stats_enable(struct hat *hat)
8380 hatlockp = sfmmu_hat_enter(hat);
8381 hat->sfmmu_rmstat++;
8387 hat_stats_disable(struct hat *hat)
8391 hatlockp = sfmmu_hat_enter(hat);
8392 hat->sfmmu_rmstat--;
8402 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8415 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8456 hat_share(struct hat *sfmmup, caddr_t addr,
8457 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8684 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8689 struct hat *ism_hatid;
9080 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9082 struct hat *tmphat;
9160 if (hmeblkp->hblk_shared || tmphat == hat ||
9606 * the hat.
9639 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9759 * set context invalid. Caller must hold the hat lock since we don't
9993 * This function will re-program hat pgsz array, and invalidate the
10005 /* USIII+-IV+ optimization, requires hat lock */
10297 * Since the sfmmu is currently embedded in the hat struct we simply zero
11784 * controlling thread with hat lock, sfmmu_flags and
11806 /* drop the private hat lock */
11808 /* acquire the shared hat lock */
11812 * after we drop the private hat lock.
11849 * hat lock.
11937 * hat locks were taken. In this case don't take the region lock by relying on
11970 * When an SCD is created the SCD hat is linked on the sfmmu
11972 * SCD. If we find an SCD hat, when walking these lists,
11973 * then we flush the shared TSBs, if we find a private hat,
12029 struct hat *ism_hatid;
12079 * from every hat sharing this ism_hat. This routine
12099 * When an SCD is created the SCD hat is linked on the ism
12101 * SCD. If we find an SCD hat, when walking these lists,
12102 * then we flush the shared TSBs, if we find a private hat,
12186 * We must hold the hat lock during the flush of TLB,
12362 * We must hold the hat lock during the flush of TLB,
12402 * holds the hat lock, threads that fault after this function is called
12471 * If the hat to-be-invalidated is the same as the current
12487 * we hold the hat lock, so nobody should allocate a context
12555 * be a shared hat, then set SCD's tsbinfo's flag.
12556 * If tsb is not shared, sfmmup is a private hat, then set
12566 /* release lock on the shared hat */
12568 /* sfmmup is a shared hat */
12572 /* get private hat from the scd list */
13217 * it's supposed to do, see hat.c and hat_srmmu.c
13221 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13299 hat_enter(struct hat *hat)
13303 if (hat != ksfmmup) {
13304 hatlockp = TSB_HASH(hat);
13310 hat_exit(struct hat *hat)
13314 if (hat != ksfmmup) {
13315 hatlockp = TSB_HASH(hat);
13331 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13338 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13345 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13602 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13809 hat_join_region(struct hat *sfmmup,
14103 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14305 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14359 * Update regionid bitmask without hat lock since no other thread
14917 * The first phase of a process joining an SCD. The hat structure is
15004 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15200 /* update ismttecnt to include SCD ism before hat leaves SCD */
15216 * the hat lock as we hold the sfmmu_as lock which prevents
15218 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15220 * while holding the hat lock.
15277 * if other thread still holds the same shared hat lock.
15279 * shared hat lock before checking the shared tsb reloc flag.
15316 * under hat lock that HAT_ISMBUSY was not set by another thread.