hat_sfmmu.c revision 25cf1a301a396c38e8adf52c15f537b80d2483f7
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * CDDL HEADER START
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * The contents of this file are subject to the terms of the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Common Development and Distribution License (the "License").
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * You may not use this file except in compliance with the License.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * See the License for the specific language governing permissions
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * and limitations under the License.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * When distributing Covered Code, include this CDDL HEADER in each
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * If applicable, add the following below this CDDL HEADER, with the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * fields enclosed by brackets "[]" replaced with your own identifying
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * information: Portions Copyright [yyyy] [name of copyright owner]
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * CDDL HEADER END
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Use is subject to license terms.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#pragma ident "%Z%%M% %I% %E% SMI"
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * VM - Hardware Address Translation management for Spitfire MMU.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * This file implements the machine specific hardware translation
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * needed by the VM system. The machine independent interface is
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * described in <vm/hat.h> while the machine dependent interface
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * and data structures are described in <vm/hat_sfmmu.h>.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * The hat layer manages the address translation hardware as a cache
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * driven by calls from the higher levels in the VM system.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl (sizeof (int64_t)))
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * SFMMU specific hat functions
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/* flags for hat_pagecachectl */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Flag to allow the creation of non-cacheable translations
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to system memory. It is off by default. At the moment this
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * flag is used by the ecache error injector. The error injector
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * will turn it on when creating such a translation then shut it
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * off when it's finished.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Flag to disable large page support.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * value of 1 => disable all large pages.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * For example, use the value 0x4 to disable 512K pages.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * WARNING: 512K pages MUST be disabled for ISM/DISM. If not
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * a process would page fault indefinitely if it tried to
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * access a 512K page.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Private sfmmu data structures for hat management
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Private sfmmu data structures for ctx management
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic struct ctx *ctxhand; /* hand used while stealing ctxs */
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic struct ctx *ctxdirty; /* head of dirty ctx list */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Private sfmmu data structures for tsb management
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * sfmmu static variables for hmeblk resource management.
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic kmutex_t ctx_list_lock; /* mutex for ctx free/dirty lists */
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * private data for ism
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * Whether to delay TLB flushes and use Cheetah's flush-all support
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * when removing contexts from the dirty list.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * HAT flags, synchronizing TLB/TSB coherency, and context management.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * The lock is hashed on the sfmmup since the case where we need to lock
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * all processes is rare but does occur (e.g. we need to unload a shared
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * mapping from all processes using the mapping). We have a lot of buckets,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * and each slab of sfmmu_t's can use about a quarter of them, giving us
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * a fairly good distribution without wasting too much space and overhead
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * when we have to grab them all.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Hash algorithm optimized for a small number of slabs.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 7 is (highbit((sizeof sfmmu_t)) - 1)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * kmem_cache, and thus they will be sequential within that cache. In
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * addition, each new slab will have a different "color" up to cache_maxcolor
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * which will skew the hashing for each successive slab which is allocated.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * If the size of sfmmu_t changed to a larger size, this algorithm may need
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * to be revisited.
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen#define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#else /* DEBUG */
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen#define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen#endif /* DEBUG */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/* sfmmu_replace_tsb() return codes. */
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michentypedef enum tsb_replace_rc {
86f95553514ce565ce5afbc1786980b5bbd4f96amichen * Flags for TSB allocation routines.
86f95553514ce565ce5afbc1786980b5bbd4f96amichen * Support for HAT callbacks.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Kernel page relocation is enabled by default for non-caged
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * kernel pages. This has little effect unless segkmem_reloc is
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * set, since by default kernel memory comes from inside the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * kernel cage.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Enable VA->PA translation sanity checking on DEBUG kernels.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Disabled by default. This is incompatible with some
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * drivers (error injector, RSM) so if it breaks you get
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to keep both pieces.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Private sfmmu routines (prototypes)
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_steal_this_hblk(struct hmehash_bucket *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlvoid sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic void sfmmu_gettte(struct hat *, caddr_t, tte_t *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void hat_pagereload(struct page *, struct page *);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl pfn_t, int, int, int, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_tlb_all_demap(void);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_idcache_constructor(void *, void *, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_idcache_destructor(void *, void *);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic int sfmmu_hblkcache_constructor(void *, void *, int);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic void sfmmu_hblkcache_destructor(void *, void *);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic void sfmmu_hblkcache_reclaim(void *);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void hat_lock_init(void);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic void hat_kstat_init(void);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
cb5caa98562cf06753163f558cbcfe30b8f4673adjlextern void sfmmu_clear_utsbinfo(void);
e37190e5b4531a897e4191a30b8f41678b582e25michen/* kpm prototypes */
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic int sfmmu_kpme_lookup(struct kpme *, page_t *);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
e37190e5b4531a897e4191a30b8f41678b582e25michenstatic int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michenstatic kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
e37190e5b4531a897e4191a30b8f41678b582e25michen/* kpm globals */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Enable trap level tsbmiss handling
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * required TLB shootdowns in this case, so handle w/ care. Off by default.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Semi-private sfmmu data structures. Some of them are initialize in
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * startup or in hat_init. Some of them are private but accessed by
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * assembly code or mach_sfmmu.c
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstruct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstruct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint uhmehash_num; /* # of buckets in user hash table */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint khmehash_num; /* # of buckets in kernel hash table */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichencaddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint ktsb_szcode; /* kernel 8k-indexed tsb size code */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichencaddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenint dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichencaddr_t utsb_vabase; /* reserved kernel virtual memory */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichencaddr_t utsb4m_vabase; /* for trap handler TSB accesses */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* sun4v */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenvmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Size to use for TSB slabs. Future platforms that support page sizes
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * larger than 4M may wish to change these values, and provide their own
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * assembly macros for building and decoding the TSB base register contents.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint_t tsb_slab_mask = 0x1ff; /* 4M page alignment for 8K pfn */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* largest TSB size to grow to, will be smaller on smaller memory systems */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Tunable parameters dealing with TSB policies.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * This undocumented tunable forces all 8K TSBs to be allocated from
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * the kernel heap rather than from the kmem_tsb_default_arena arenas.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Decide whether to use per-lgroup arenas, or one global set of
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * TSB arenas. The default is not to break up per-lgroup, since
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * most platforms don't recognize any tangible benefit from it.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Used for growing the TSB based on the process RSS.
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * tsb_rss_factor is based on the smallest TSB, and is
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * shifted by the TSB size to determine if we need to grow.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * The default will grow the TSB if the number of TTEs for
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * this page size exceeds 75% of the number of TSB entries,
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * which should _almost_ eliminate all conflict misses
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * (at the expense of using up lots and lots of memory).
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* which TSB size code to use for new address spaces or if rss sizing off */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic int tsb_alloc_count = 0;
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* if set to 1, will remap valid TTEs when growing TSB. */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * If we have more than this many mappings, allocate a second TSB.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * This default is chosen because the I/D fully associative TLBs are
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * assumed to have at least 8 available entries. Platforms with a
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * larger fully-associative TLB could probably override the default.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * kstat data
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Global data
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* sfmmu locking operations */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic kmutex_t *sfmmu_mlspl_enter(struct page *, int);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* sfmmu internal locking operations - accessed directly */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void sfmmu_mlist_reloc_enter(page_t *, page_t *,
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void sfmmu_hat_lock_all(void);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenstatic void sfmmu_hat_unlock_all(void);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Array of mutexes protecting a page's mapping list and p_nrm field.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * The hash function looks complicated, but is made up so that:
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * "pp" not shifted, so adjacent pp values will hash to different cache lines
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock)
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * "pp" >> mml_shift, incorporates more source bits into the hash result
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * "& (mml_table_size - 1), should be faster than using remainder "%"
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * Hopefully, mml_table, mml_table_size and mml_shift are all in the same
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * cacheline, since they get declared next to each other below. We'll trust
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * ld not to do something random.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)])
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#else /* !DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* !DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichenuint_t mml_shift; /* log2(mml_table_sz) + 3 for align */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * kpm_page lock hash.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * All slots should be used equally and 2 adjacent kpm_page_t's
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * shouldn't have their mutexes in the same cache line.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#define KPMP_HASH(kpp) (kpmp_hash_debug ? &kpmp_table[0] : &kpmp_table[ \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#else /* !DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#define KPMP_SHASH(kpp) (kpmp_hash_debug ? &kpmp_stable[0] : &kpmp_stable[ \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#else /* !DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen#endif /* DEBUG */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * SPL_HASH was improved to avoid false cache line sharing
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * hat_unload_callback() will group together callbacks in order
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to avoid xt_sync() calls. This is the maximum size of the group.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Debugging trace ring buffer for stolen and freed ctxs. The
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * stolen_ctxs[] array is protected by the ctx_trace_mutex.
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstruct ctx_trace *ctx_trace_last = &stolen_ctxs[TRSIZE-1];
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#endif /* DEBUG */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * kpm virtual address to physical address
cb5caa98562cf06753163f558cbcfe30b8f4673adjl r = ((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift; \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl if (r != 0) { \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl if (r > v) \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Wrapper for vmem_xalloc since vmem_create only allows limited
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * parameters for vm_source_alloc functions. This function allows us
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to specify alignment consistent with the size of the object being
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * allocated.
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void *
cb5caa98562cf06753163f558cbcfe30b8f4673adjlsfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/* Common code for setting tsb_alloc_hiwater. */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * a single TSB. physmem is the number of physical pages so we need physmem 8K
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * TTEs to represent all those physical pages. We round this up by using
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 1<<highbit(). To figure out which size code to use, remember that the size
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * code is just an amount to shift the smallest TSB size to get the size of
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * highbit() - 1) to get the size code for the smallest TSB that can represent
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * all of physical memory, while erring on the side of too much.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * If the computed size code is less than the current tsb_max_growsize, we set
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * tsb_max_growsize to the computed size code. In the case where the computed
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * size code is greater than tsb_max_growsize, we have these restrictions that
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * apply to increasing tsb_max_growsize:
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 1) TSBs can't grow larger than the TSB slab size
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl i--; /* 2^n case, round down */ \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * tsb_info which handles that TTE size.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Return the number of mappings present in the HAT
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * for a particular process and page size.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Macro to use to unload entries from the TSB.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * It has knowledge of which page sizes get replicated in the TSB
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * and will call the appropriate unload routine for the appropriate size.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl } else { \
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/* Update tsb_alloc_hiwater after memory is configured. */
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen/*ARGSUSED*/
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void
cb5caa98562cf06753163f558cbcfe30b8f4673adjlsfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* Assumes physmem has already been updated. */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * deleted.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/*ARGSUSED*/
cb5caa98562cf06753163f558cbcfe30b8f4673adjl return (0);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/*ARGSUSED*/
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void
cb5caa98562cf06753163f558cbcfe30b8f4673adjlsfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Whether the delete was cancelled or not, just go ahead and update
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * tsb_alloc_hiwater and tsb_max_growsize.
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * HME_BLK HASH PRIMITIVES
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Enter a hme on the mapping list for page pp.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * When large pages are more prevalent in the system we might want to
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * keep the mapping list in ascending order by the hment size. For now,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * small pages are more frequent, so don't slow it down.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl } else { \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* EMPTY */ \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Enter a hme on the mapping list for page pp.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * If we are unmapping a large translation, we need to make sure that the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * change is reflect in the corresponding bit of the p_index field.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl } else { \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* zero out the entry */ \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* remove mappings for remainder of large pg */ \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * This function returns the hment given the hme_blk and a vaddr.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * It assumes addr has already been checked to belong to hme_blk's
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Version of HBLKTOHME that also returns the index in hmeblkp
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * of the hment.
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Disable any page sizes not supported by the CPU
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Initialize mmu-specific large page sizes.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
606f6aa3d37f0f8e8282e483c1400bae5275aeebmichen * Initialize the hardware address translation structures.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Hardware-only bits in a TTE
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* Initialize the hash locks */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < khmehash_num; i++) {
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < uhmehash_num; i++) {
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Initialize ctx structures and list lock.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * We keep two lists of ctxs. The "free" list contains contexts
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * ready to use. The "dirty" list contains contexts that are OK
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to use after flushing the TLBs of any stale mappings.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Intialize ism mapping list lock.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmuid_cache = kmem_cache_create("sfmmuid_cache", sizeof (sfmmu_t),
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Since we only use the tsb8k cache to "borrow" pages for TSBs
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * from the heap when low on memory or when TSB_FORCEALLOC is
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * specified, don't use magazines to cache them--we want to return
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * them to the system as quickly as possible.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * memory, which corresponds to the old static reserve for TSBs.
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * memory we'll allocate for TSB slabs; beyond this point TSB
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * allocations will be taken from the kernel heap (via
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * consumer.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* Set tsb_max_growsize. */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * On smaller memory systems, allocate TSB memory in 512K chunks
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * instead of the default 4M slab size. The trap handlers need to
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * be patched with the final slab shift since they need to be able
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * to construct the TSB pointer at runtime.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl tsb_slab_mask = 0x3f; /* 512K page alignment for 8K pfn */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Set up memory callback to update tsb_alloc_hiwater and
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * tsb_max_growsize.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * kmem_tsb_arena is the source from which large TSB slabs are
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * drawn. The quantum of this arena corresponds to the largest
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * TSB size we can dynamically allocate for user processes.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Currently it must also be a supported page size since we
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * use exactly one translation entry to map each slab page.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * which most TSBs are allocated. Since most TSB allocations are
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * typically 8K we have a kmem cache we stack on top of each
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * kmem_tsb_default_arena to speed up those allocations.
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * Note the two-level scheme of arenas is required only
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * because vmem_create doesn't allow us to specify alignment
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * requirements. If this ever changes the code could be
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen * simplified to use only one level of arenas.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena,
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen char s[50];
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < NLGRPS_MAX; i++) {
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
80b80bf0416a7eb4be4215b2e192cafd03ca80b7michen segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * We grab the first hat for the kernel,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Initialize hblk_reserve.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Reserve some kernel virtual address space for the locked TTEs
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * that allow us to probe the TSB from TL>0.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * The big page VAC handling code assumes VAC
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * will not be bigger than the smallest big
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * page- which is 64K.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Initialize relocation locks. kpr_suspendlock is held
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * at PIL_MAX to prevent interrupts from pinning the holder
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * of a suspended TTE which may access it leading to a
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * deadlock condition.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Initialize locking for the hat layer, called early during boot.
cb5caa98562cf06753163f558cbcfe30b8f4673adjlstatic void
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * initialize the array of mutexes protecting a page's mapping
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * list and p_nrm field.
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen for (i = 0; i < mml_table_sz; i++)
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < kpmp_table_sz; i++) {
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * Initialize array of mutex locks that protects sfmmu fields and
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * TSB lists.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < SFMMU_NUM_LOCK; i++)
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl mutex_init(&ctx_trace_mutex, NULL, MUTEX_DEFAULT, NULL);
cb5caa98562cf06753163f558cbcfe30b8f4673adjl#endif /* DEBUG */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Allocate a hat structure.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Called when an address space first uses a hat.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * hat_kern_setup() will call sfmmu_init_ktsbinfo()
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * to setup tsb_info for ksfmmup.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Just set to invalid ctx. When it faults, it will
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * get a valid ctx. This would avoid the situation
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * where we get a ctx, but it gets stolen and then
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * we fault when we try to run and so have to get
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * another ctx.
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen /* initialize original physical page coloring bin */
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen /* chose a random tsb size for stress testing */
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen#endif /* DEBUG */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl for (i = 0; i < max_mmu_page_sizes; i++) {
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Hat_setup, makes an address space context the current active one.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * In sfmmu this translates to setting the secondary context with the
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * corresponding context.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* Init needs some special treatment. */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Make sure that we have
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 1. a TSB
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * 2. a valid ctx that doesn't get stolen after this point.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Swap in the TSB. hat_init() allocates tsbinfos without
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * TSBs, but we need one for init, since the kernel does some
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * special things to set up its stack and needs the TSB to
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen * resolve page faults.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Allow ctx to be stolen.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Free all the translation resources for the specified address space.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Called from as_free when an address space is being destroyed.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
bf1e3bee1b13b3a914f0dd817a04f6e0ce8e0691michen for (i = 0; i < mmu_page_sizes; i++) {
cb5caa98562cf06753163f558cbcfe30b8f4673adjl /* EMPTY */
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * Set up any translation structures, for the specified address space,
cb5caa98562cf06753163f558cbcfe30b8f4673adjl * that are needed or preferred when the process is being swapped in.
cb5caa98562cf06753163f558cbcfe30b8f4673adjl/* ARGSUSED */
int cnum;
struct free_tsb {
for (i = 0; i <= UHMEHASH_SZ; i++) {
prevpa = 0;
while (hmeblkp) {
(void *)pp);
#if defined(SF_ERRATA_57)
int use_lgpg = 0;
if (len == 0)
#if defined(SF_ERRATA_57)
(void *)pp);
(void *)pp);
case HAT_STRICTORDER:
case HAT_UNORDERED_OK:
case HAT_MERGING_OK:
case HAT_LOADCACHING_OK:
case HAT_STORECACHING_OK:
while (len) {
if (!use_lgpg) {
flags);
pfn++;
flags);
flags);
flags);
flags);
pfn++;
int ttesz;
int large_pages_disable;
#if defined(SF_ERRATA_57)
numpg);
if (npgs) {
int index;
while (npgs) {
pps++;
npgs--;
int phys = 0;
#ifndef sun4v
#if defined(UTSB_PHYS)
int phys = 0;
#ifndef sun4v
#if defined(UTSB_PHYS)
int szc = 0;
#ifdef DEBUG
if (tsb_grow_stress) {
szc++;
return (szc);
int ret;
return (ret);
static struct hmehash_bucket *
int hmeshift;
return (hmebp);
static struct hme_blk *
int hmeshift;
goto ttearray_realloc;
goto ttearray_realloc;
goto ttearray_realloc;
return (hmeblkp);
#ifdef DEBUG
#if defined(TTE_IS_GLOBAL)
#ifdef DEBUG
switch (size) {
case TTE8K:
case TTE64K:
case TTE512K:
case TTE4M:
case (TTE32M):
case (TTE256M):
if (pp) {
if (remap) {
(void *)hmeblkp);
if (pp) {
(void *)hmeblkp);
#ifdef DEBUG
#ifdef DEBUG
if (pp) {
if (!remap) {
int cflags = 0;
int vac_err = 0;
int newidx = 0;
if (remap)
if (vac_err == 0) {
if (newidx)
~index);
pps--;
pfnum++;
if (vac_err) {
npgs);
return (HAT_TMPNC);
return (HAT_TMPNC);
static struct hme_blk *
return (hmeblkp);
int hashno)
prevpa = 0;
while (hmeblkp) {
pr_hblk);
if (shadow) {
shadow = 0;
endaddr);
hashno++;
static caddr_t
if (ret < 0)
goto readtte;
sfhme++;
return (addr);
int capture_cpus)
return (id);
int locked = 0;
return (EINVAL);
return (ENOMEM);
hashno++) {
return (ENXIO);
return (ENXIO);
pp);
goto rehash;
locked = 0;
goto rehash;
if (locked)
if (locked)
int locked = 0;
hashno++) {
if (!panicstr) {
saddr);
goto rehash;
locked = 0;
goto rehash;
if (!panicstr) {
(void *)pp);
if (locked)
if (locked)
== PFN_SUSPENDED) {
if (ism_blkp) {
if (locked_hatid) {
hashno++;
*attr = 0;
int mode)
(void *)addr);
hashno++;
static caddr_t
int ttesz;
int ret;
int use_demap_range;
#if defined(SF_ERRATA_57)
int check_exec;
if (use_demap_range) {
#if defined(SF_ERRATA_57)
goto next_addr;
if (pp) {
#if defined(SF_ERRATA_57)
if (ret < 0) {
if (pml) {
} else if (ret > 0) {
if (pml) {
sfhmep++;
return (addr);
static uint64_t
switch (mode) {
case SFMMU_CHGATTR:
case SFMMU_SETATTR:
case SFMMU_CLRATTR:
static uint_t
return (attr);
hashno++;
static caddr_t
int ttesz;
int ret;
int use_demap_range;
#if defined(SF_ERRATA_57)
int check_exec;
#ifdef DEBUG
#if defined(SF_ERRATA_57)
if (use_demap_range) {
goto next_addr;
if (pp) {
#if defined(SF_ERRATA_57)
if (ret < 0) {
if (pml) {
} else if (ret > 0) {
if (pml) {
sfhmep++;
return (addr);
static uint_t
switch (vprot) {
case (PROT_READ):
case (PROT_EXEC):
case (PROT_WRITE):
int addr_cnt = 0;
int cnum;
for (i = 0; i <= UHMEHASH_SZ; i++) {
prevpa = 0;
while (hmeblkp) {
goto next_block;
goto next_block;
for (a = 0; a < MAX_CB_ADDR; ++a) {
addr_cnt = 0;
for (a = 0; a < addr_cnt; ++a) {
int addr_count = 0;
iskernel = 0;
if (iskernel) {
hashno++;
pr_hblk);
if (iskernel) {
hashno--;
if (addr_count > 0 &&
--addr_count;
pr_hblk);
for (a = 0; a < MAX_CB_ADDR; ++a) {
addr_count = 0;
if (iskernel) {
for (a = 0; a < addr_count; ++a) {
int sz;
int p_index;
sz = 0;
sz++;
return (sz);
static caddr_t
int ttesz;
long ttecnt;
int ret;
int use_demap_range;
#ifdef DEBUG
if (use_demap_range) {
ttecnt = 0;
if (pml) {
goto tte_unloaded;
if (ret <= 0) {
goto again;
goto tte_unloaded;
ttecnt++;
if (use_demap_range) {
if (do_virtual_coloring) {
CACHE_FLUSH, 0);
if (pp) {
membar_stst();
goto again;
#ifdef DEBUG
if (pml) {
sfhmep++;
if (ttecnt > 0)
return (addr);
hashno++;
static caddr_t
int ttesz;
int ret;
if (pp) {
if (ret < 0) {
if (pml) {
if (ret > 0) {
hmeblkp, 0, 0);
if (pml) {
sfhmep++;
return (addr);
int sz;
if (rm == 0) {
if (!pp)
int ret;
int locked = 0;
if (capture_cpus)
*capture_cpus = 0;
top:
if (locked)
if (ret != 0)
if (locked) {
goto top;
if (locked)
int locked = 0;
top:
if (locked)
if (locked) {
goto top;
if (locked)
if (dtrace_kreloc_init)
(*dtrace_kreloc_init)();
goto again;
while (index != 0) {
if (index != 0)
cons++;
goto retry;
#ifdef DEBUG
struct prle {
int status;
int pausecpus;
static int prl_entry;
#define PAGE_RELOCATE_LOG(t, r, s, p) \
#define PAGE_RELOCATE_LOG(t, r, s, p)
int old_pil;
int cap_cpus;
int ret;
return (EAGAIN);
for (i = 0; i < npages; i++) {
tpp++;
if (ret != 0) {
return (EAGAIN);
if (ret != 0) {
goto suspend_fail;
if (ret != 0) {
return (EAGAIN);
int xhme_blks;
int pa_hments;
xhme_blks = 0;
pa_hments = 0;
pa_hments++;
while (index != 0) {
if (index != 0)
cons++;
goto retry;
if (xhme_blks) {
goto retry_xhat;
static cpuset_t
#ifdef DEBUG
int ttesz;
int ret;
return (cpuset);
#ifdef DEBUG
if (ret < 0) {
#ifdef DEBUG
goto readtte;
if (ret == 0) {
if (do_virtual_coloring)
} else if (do_virtual_coloring) {
CACHE_FLUSH, 0);
membar_stst();
return (cpuset);
while (index != 0) {
if (index != 0)
cons++;
goto retry;
if (dtrace_kreloc_fini)
(*dtrace_kreloc_fini)();
index = 0;
while (index) {
cons++;
goto retry;
static cpuset_t
int ret;
if (ret < 0) {
goto sfmmu_pagesync_retry;
if (ret > 0) {
return (cpuset);
static cpuset_t
int ret;
if (ret < 0)
goto retry;
if (ret > 0) {
return (cpuset);
int index;
int cons;
while (index) {
cons++;
goto retry;
#ifdef DEBUG
if (hat_check_vtop == 0)
if (!pp)
while (index != 0) {
if (index != 0)
cons++;
goto again;
== PFN_SUSPENDED) {
return (pfn);
int badcaller = 0;
extern int segkmem_reloc;
== PFN_SUSPENDED) {
if (badcaller) {
hat_kpr_enabled = 0;
segkmem_reloc = 0;
return (pfn);
if (ism_blkp) {
if (locked_hatid) {
return (pfn);
hashno++;
return (PFN_INVALID);
if (kpm_enable)
while (index) {
sz++;
return (cnt);
int index;
int sz;
int sync = 0;
if (pszc == 0) {
goto out;
if (index) {
while (index) {
sz++;
sz++;
if (sync) {
while (--npgs > 0) {
if (sz) {
if (sz == 0) {
out:
return (npgs);
for (i = 0; i < mmu_page_sizes; i++)
return (assize);
for (i = 0; i < mmu_page_sizes; i++)
return (assize);
int i, added;
int reload_mmu = 0;
#ifdef DEBUG
return (EINVAL);
return (EINVAL);
#ifdef DEBUG
while (ism_blkp) {
added = 0;
while (!added) {
for (i = 0; i < ISM_MAP_SLOTS; i++) {
membar_stst();
membar_stst();
for (i = 0; i <= ismszc; i++) {
switch (ismszc) {
case TTE256M:
case TTE32M:
case TTE4M:
found = 0;
for (i = 0; i < ISM_MAP_SLOTS; i++) {
if (!found)
if (found) {
while (ism_blkp) {
if (ism_blkp) {
for (i = 0; i <= ismszc; i++) {
#ifdef HBLK_TRACE
#ifdef HBLK_TRACE
prevpa = 0;
while (hmeblkp) {
prevpa = 0;
while (hmeblkp) {
int color;
return (color);
int vcolor;
int sz;
while (dopgs != 0) {
curnpgs);
if (dopgs == 0) {
int clr_valid = 0;
int i, ncolors;
for (i = 0; i < npages; i++) {
clr_valid = 0;
if (!clr_valid) {
for (i = 0; i < ncolors; i++) {
for (i = 0; i < npages; i++) {
bcolor);
#ifdef DEBUG
if (ret < 0) {
switch (flags) {
case HAT_CACHE:
case HAT_TMPNC:
case HAT_UNCACHE:
membar_enter();
static struct ctx *
return (ctx);
found_stealable_ctx = 0;
if (delay_tlb_flush)
if (found_stealable_ctx) {
goto retry;
membar_exit();
return (ctx);
int cnum;
static tsb_replace_rc_t
int ctxnum;
return (TSB_LOSTRACE);
return (TSB_SUCCESS);
return (TSB_ALLOCFAIL);
return (TSB_LOSTRACE);
#ifdef DEBUG
if (prevtsb)
membar_exit();
return (TSB_SUCCESS);
int i, cnum;
if (tmp_pgsz) {
for (i = 0; i < mmu_page_sizes; i++)
uint8_t i;
int sectsb_thresh;
for (i = 0; i < mmu_page_sizes; i++) {
if (&mmu_check_page_sizes)
int tsb_bits;
if (growing)
if (growing)
0 : TSB_ALLOC;
TSB_OK_GROW()) {
return (szc);
return (TTE4M);
return (szc);
return (TTE8K);
int ctxnum;
membar_enter();
#ifdef DEBUG
while (blkp) {
#ifdef DEBUG
for (i = 0; i < ISM_MAP_SLOTS; i++) {
kmutex_t *
kmutex_t *
static kmutex_t *
if (pszc == 0) {
return (mtx);
return (mtx);
goto again;
return (mtx);
goto again;
static uint_t
freehblkcnt--;
static uint_t
freehblkcnt++;
freehblkcnt--;
#ifdef DEBUG
#ifdef DEBUG
if (*high)
if (high)
static hatlock_t *
return (hatlockp);
return (NULL);
static hatlock_t *
return (NULL);
return (hatlockp);
return (NULL);
sfmmu_hat_lock_all(void)
for (i = 0; i < SFMMU_NUM_LOCK; i++)
sfmmu_hat_unlock_all(void)
if (!hatlock_held)
if (!hatlock_held)
if (!hatlock_held)
if (!hatlock_held)
static struct hme_blk *
int sleep;
if (!hblk_alloc_dynamic) {
hmeblkp =
hmeblkp =
goto hblk_init;
goto hblk_verify;
goto fill_hblk;
goto fill_hblk;
goto re_verify;
owner = 0;
return (newhblkp);
goto fill_hblk;
goto fill_hblk;
return (hmeblkp);
int size;
if (shw_hblkp) {
static struct hme_blk *
BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
prevpa = 0;
while (hmeblkp) {
pr_hblk)) {
prevpa = 0;
while (hmeblkp) {
return (hmeblkp);
if (shw_hblkp) {
struct hme_blk *
return (hmeblkp);
switch (rc) {
case TSB_SUCCESS:
case TSB_ALLOCFAIL:
if (!gotfirst) {
char lwp_save_state;
goto retry;
goto retry;
goto retry;
int ctxnum;
int vcolor;
int ttesz;
ctxnum);
int hat_lock_held)
if (!tlb_noflush) {
if (!hat_lock_held)
if (!hat_lock_held)
ctxnum);
int ctxnum;
if (tlb_noflush)
if (!hat_lock_held)
if (!hat_lock_held)
static int sfmmu_xcall_save;
int ctxnum;
int pgunload = 0;
int dirtypg = 0;
while (bitvec != 0) {
dirtypg = 0;
pgcnt++;
dirtypg++;
int ctxnum;
sfmmu_tlb_all_demap(void)
int cnum;
if (delay_tlb_flush) {
int cnum;
if (&sendmondo_in_recover) {
while (sendmondo_in_recover) {
membar_exit();
int err;
return (err);
int ret;
#ifndef sun4v
int lowmem = 0;
int ret;
#ifdef DEBUG
tsb_alloc_count = 0;
return (ENOMEM);
if (tsb_lgrp_affinity)
#ifdef DEBUG
return (EAGAIN);
if (ret != 0) {
if (kmem_cachep) {
S_WRITE);
return (EAGAIN);
sfmmu_init_tsbs(void)
#ifndef sun4v
extern int dcache_line_mask;
#ifndef sun4v
if (kpm_enable == 0)
if (kpm_smallpages) {
if (kpm_smallpages == 0) {
#ifdef DEBUG
if (ktsb_phys)
static uint64_t
return (pa);
size_t i;
ulong_t j = 0, k = 0;
* it's supposed to do, see hat.c and hat_srmmu.c
return (FC_NOSUPPORT);
int index;
while (npgs-- > 0) {
switch (feature) {
case HAT_SHARED_PT:
case HAT_DYNAMIC_ISM_UNMAP:
case HAT_VMODSORT:
hat_kstat_init(void)
if (ksp) {
if (ksp) {
if (ksp) {
#ifdef DEBUG
pfn_t i, j, k;
#ifdef lint
int new_offset;
int vpshift;
int last_prefetch;
vpshift =
if (kpm_enable == 0) {
return (vaddr);
if (kpm_enable == 0) {
(void *)pp);
page_t *
int error;
if (kpm_enable == 0) {
return (ENOTSUP);
return (EFAULT);
return (EFAULT);
if (kpm_smallpages == 0)
return (error);
pgcnt_t i;
if (kpm_enable == 0)
for (i = 0; i < nentries; i++)
if (kpm_enable == 0)
if (kpm_enable == 0)
if (kpm_enable == 0)
if (kpm_enable == 0)
if (kpm_smallpages == 0)
return (end);
if (kpm_enable == 0)
if (kpm_enable == 0)
if (lo) {
if (kpm_smallpages == 0) {
if (hi) {
if (kpm_smallpages == 0) {
int vcolor;
void *base;
return (pfn);
struct kpme *p;
if (p == kpme)
static caddr_t
int kpm_vac_range;
int uncached;
int oldval;
if (kpm_smallpages)
goto smallpages_mapin;
if (uncached) {
if (kpm_vac_range == 0) {
if (uncached == 0)
goto exit;
if (kpm_vac_range == 0) {
if (uncached == 0) {
exit:
return (vaddr);
if (uncached == 0) {
if (oldval != 0)
return (vaddr);
int alias_range;
int oldval;
if (kpm_smallpages)
goto smallpages_mapout;
if (alias_range) {
(void *)kp);
goto exit;
#ifdef DEBUG
if (kpm_tlb_flush)
(void *)kp);
(void *)kp);
exit:
#ifdef DEBUG
if (kpm_tlb_flush)
#define abs(x) ((x) < 0 ? -(x) : (x))
static caddr_t
*kpm_vac_rangep = 0;
return (vaddr);
return (vaddr);
return (vaddr);
return (vaddr);
#define KPM_TSBM_CONFL_GONE (0)
int error;
int alias_range;
int uncached = 0;
int badstate;
return (EFAULT);
if (alias_range) {
goto smallexit;
goto exit;
if (badstate == 0)
goto largeexit;
goto badstate_exit;
switch (tsbmcase) {
goto largeexit;
goto smallexit;
goto smallexit;
goto smallexit;
(void *)pp);
goto largeexit;
goto smallexit;
(void *)pp);
(void *)pp);
goto smallexit;
if (uncached == 0)
error = 0;
goto exit;
error = 0;
exit:
return (error);
int error = 0;
int oldval;
return (EFAULT);
return (error);
int vcolor;
#define KPM_VUL_BIG (0)
int newcolor;
int badstate = 0;
if (kpm_smallpages)
goto smallpages_vac_unload;
(void *)kp);
if (newcolor == 0)
goto exit;
} else if (newcolor == 0) {
badstate++;
goto exit;
if (badstate)
goto exit;
goto exit;
switch (vacunlcase) {
badstate++;
exit:
if (badstate) {
if (newcolor == 0)
if (kpm_smallpages)
goto smallpages_hme_unload;
(void *)kp);
(void *)kp);
static kpm_hlk_t *
return (NULL);
return (kpmp);
#define KPM_UNC_BIG (0)
int badstate = 0;
int oldval;
if (kpm_smallpages)
goto smallpages_page_cache;
goto exit;
if (badstate)
goto exit;
switch (pgcacase) {
badstate++;
goto exit;
switch (pgcacase) {
badstate++;
exit:
if (badstate) {
hat_dump(void)