mach_kpm.c revision 0d5ae8c1274da6a6c74059317942eea625104946
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Kernel Physical Mapping (segkpm) hat interface routines for sun4u.
*/
#include <vm/hat_sfmmu.h>
#include <sys/sysmacros.h>
#include <sys/machsystm.h>
#include <sys/cpu_module.h>
#include <vm/mach_kpm.h>
/* kpm prototypes */
void sfmmu_kpm_pageunload(page_t *);
static void sfmmu_kpm_demap_large(caddr_t);
static void sfmmu_kpm_demap_small(caddr_t);
static void sfmmu_kpm_demap_tlbs(caddr_t);
void sfmmu_kpm_hme_unload(page_t *);
void sfmmu_kpm_page_cache(page_t *, int, int);
extern uint_t vac_colors;
/*
* Kernel Physical Mapping (kpm) facility
*/
void
{}
/* -- hat_kpm interface section -- */
/*
* Mapin a locked page and return the vaddr.
* When a kpme is provided by the caller it is added to
* the page p_kpmelist. The page to be mapped in must
* be at least read locked (p_selock).
*/
{
if (kpm_enable == 0) {
}
}
/*
* Tolerate multiple mapins for the same kpme to avoid
* the need for an extra serialization.
*/
} else {
}
return (vaddr);
}
/*
* Mapout a locked page.
* When a kpme is provided by the caller it is removed from
* the page p_kpmelist. The page to be mapped out must be at
* least read locked (p_selock).
* Note: The seg_kpm layer provides a mapout interface for the
* case that a kpme is used and the underlying page is unlocked.
* This can be used instead of calling this function directly.
*/
void
{
if (kpm_enable == 0) {
return;
}
if (IS_KPM_ADDR(vaddr) == 0) {
return;
}
return;
}
panic("hat_kpm_mapout: kpme not found pp=%p",
(void *)pp);
} else {
}
}
/*
* hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
* memory addresses that are not described by a page_t. It can
* only be supported if vac_colors=1, because there is no page_t
* and corresponding kpm_page_t to track VAC conflicts. Currently,
* this may not be used on pfn's backed by page_t's, because the
* kpm state may not be consistent in hat_kpm_fault if the page is
* mapped using both this routine and hat_kpm_mapin. KPM should be
* cleaned up on sun4u/vac_colors=1 to be minimal as on sun4v.
* The caller must only pass pfn's for valid physical addresses; violation
* of this rule will cause panic.
*/
{
return (vaddr);
}
/*ARGSUSED*/
void
{
/* empty */
}
/*
* Return the kpm virtual address for the page at pp.
* If checkswap is non zero and the page is backed by a
* swap vnode the physical address is used rather than
* p_offset to determine the kpm region.
* Note: The function has to be used w/ extreme care. The
* stability of the page identity is in the responsibility
* of the caller.
*/
/*ARGSUSED*/
{
else
}
}
/*
* Return the page for the kpm virtual address vaddr.
* Caller is responsible for the kpm mapping and lock
* state of the page.
*/
page_t *
{
return (page_numtopp_nolock(pfn));
}
/* page to kpm_page */
\
}
/* page to kpm_spage */
\
}
/*
* hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred
* which could not be resolved by the trap level tsbmiss handler for the
* following reasons:
* . The vaddr is in VAC alias range (always PAGESIZE mapping size).
* . The kpm (s)page range of vaddr is in a VAC alias prevention state.
* . tsbmiss handling at trap level is not desired (DEBUG kernel only,
* kpm_tsbmtl == 0).
*/
int
{
int error;
if (kpm_enable == 0) {
return (ENOTSUP);
}
}
/*
* hat_kpm_mapin_pfn may add a kpm translation for memory that falls
* outside of memsegs. Check for this case and provide the translation
* here.
*/
error = 0;
else if (kpm_smallpages == 0)
else
return (error);
}
/*
* memseg_hash[] was cleared, need to clear memseg_phash[] too.
*/
void
{
pgcnt_t i;
if (kpm_enable == 0)
return;
for (i = 0; i < nentries; i++)
memseg_phash[i] = MSEG_NULLPTR_PA;
}
/*
* Update memseg_phash[inx] when memseg_hash[inx] was changed.
*/
void
{
if (kpm_enable == 0)
return;
}
/*
* Update kpm memseg members from basic memseg info.
*/
void
{
if (kpm_enable == 0)
return;
}
/*
* Setup nextpa when a memseg is inserted.
* Assumes that the memsegslock is already held.
*/
void
{
if (kpm_enable == 0)
return;
}
/*
* Setup memsegspa when a memseg is (head) inserted.
* Called before memsegs is updated to complete a
* memseg insert operation.
* Assumes that the memsegslock is already held.
*/
void
{
if (kpm_enable == 0)
return;
}
/*
* Return end of metadata for an already setup memseg.
*
* Note: kpm_pages and kpm_spages are aliases and the underlying
* member of struct memseg is a union, therefore they always have
* the same address within a memseg. They must be differentiated
* when pointer arithmetic is used with them.
*/
{
if (kpm_smallpages == 0)
else
return (end);
}
/*
* Update memsegspa (when first memseg in list
* is deleted) or nextpa when a memseg deleted.
* Assumes that the memsegslock is already held.
*/
void
{
if (kpm_enable == 0)
return;
} else {
}
}
/*
* Update kpm members for all memseg's involved in a split operation
* and do the atomic update of the physical memseg chain.
*
* Note: kpm_pages and kpm_spages are aliases and the underlying member
* of struct memseg is a union, therefore they always have the same
* address within a memseg. With that the direct assignments and
* va_to_pa conversions below don't have to be distinguished wrt. to
* kpm_smallpages. They must be differentiated when pointer arithmetic
* is used with them.
*
* Assumes that the memsegslock is already held.
*/
void
{
if (kpm_enable == 0)
return;
if (lo) {
/* align end to kpm page size granularity */
}
/* mid */
/* align end to kpm page size granularity */
if (kpm_smallpages == 0) {
} else {
}
if (hi) {
/* align end to kpm page size granularity */
if (kpm_smallpages == 0) {
} else {
}
}
/*
* Atomic update of the physical memseg chain
*/
} else {
}
}
/*
* Walk the memsegs chain, applying func to each memseg span and vcolor.
*/
void
{
int vcolor;
void *base;
}
}
}
/* -- sfmmu_kpm internal section -- */
/*
* Return the page frame number if a valid segkpm mapping exists
* for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
* Should only be used by other sfmmu routines.
*/
{
return (pfn);
else
return ((pfn_t)PFN_INVALID);
}
/*
* Lookup a kpme in the p_kpmelist.
*/
static int
{
struct kpme *p;
if (p == kpme)
return (1);
}
return (0);
}
/*
* Insert a kpme into the p_kpmelist and increment
* the per page kpm reference count.
*/
static void
{
/* head insert */
if (pp->p_kpmelist)
}
/*
* Remove a kpme from the p_kpmelist and decrement
* the per page kpm reference count.
*/
static void
{
} else {
}
}
}
/*
* Mapin a single page, it is called every time a page changes it's state
* from kpm-unmapped to kpm-mapped. It may not be called, when only a new
* kpm instance does a mapin and wants to share the mapping.
* Assumes that the mlist mutex is already grabbed.
*/
static caddr_t
{
kpm_page_t *kp;
int kpm_vac_range;
int uncached;
int oldval;
if (kpm_smallpages)
goto smallpages_mapin;
if (uncached) {
/* ASSERT(pp->p_share); XXX use hat_page_getshare */
if (kpm_vac_range == 0) {
if (kp->kp_refcnts == 0) {
/*
* Must remove large page mapping if it exists.
* Pages in uncached state can only be mapped
* small (PAGESIZE) within the regular kpm
* range.
*/
/* remove go indication */
}
}
kp->kp_refcntc++;
}
PP_SETKPMC(pp);
}
/*
* Have to do a small (PAGESIZE) mapin within this kpm_page
* range since it is marked to be in VAC conflict mode or
* when there are still other small mappings around.
*/
/* tte assembly */
if (uncached == 0)
else
/* tsb dropin */
PP_SETKPMS(pp);
kp->kp_refcnts++;
goto exit;
}
if (kpm_vac_range == 0) {
/*
* Fast path / regular case, no VAC conflict handling
* in progress within this kpm_page range.
*/
/* tte assembly */
/* tsb dropin */
/* Set go flag for TL tsbmiss handler */
if (kp->kp_refcntc == 0)
}
} else {
/*
* The page is not setup according to the common VAC
* prevention rules for the regular and kpm mapping layer
* E.g. the page layer was not able to deliver a right
* vcolor'ed page for a given vaddr corresponding to
* the wanted p_offset. It has to be mapped in small in
* within the corresponding kpm vac range in order to
* prevent VAC alias conflicts.
*/
/* tte assembly */
if (uncached == 0) {
} else {
}
/* tsb dropin */
kp->kp_refcnta++;
/* remove go indication */
}
}
exit:
return (vaddr);
if (uncached == 0) {
/* tte assembly */
} else {
/*
* Just in case this same page was mapped cacheable prior to
* this and the old tte remains in tlb.
*/
/* ASSERT(pp->p_share); XXX use hat_page_getshare */
PP_SETKPMC(pp);
/* tte assembly */
}
/* tsb dropin */
(KPM_MAPPED_GO | KPM_MAPPEDS));
if (oldval != 0)
panic("sfmmu_kpm_mapin: stale smallpages mapping");
return (vaddr);
}
/*
* Mapout a single page, it is called every time a page changes it's state
* from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm
* instance calls mapout and there are still other instances mapping the
* page. Assumes that the mlist mutex is already grabbed.
*
* Note: In normal mode (no VAC conflict prevention pending) TLB's are
* not flushed. This is the core segkpm behavior to avoid xcalls. It is
* no problem because a translation from a segkpm virtual address to a
* physical address is always the same. The only downside is a slighty
* increased window of vulnerability for misbehaving _kernel_ modules.
*/
static void
{
kpm_page_t *kp;
int alias_range;
int oldval;
if (kpm_smallpages)
goto smallpages_mapout;
if (alias_range) {
if (kp->kp_refcnta <= 0) {
panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
(void *)kp);
}
/*
* Uncached kpm mappings must always have
* forced "small page" mode.
*/
panic("sfmmu_kpm_mapout: uncached page not "
"kpm marked");
}
PP_CLRKPMC(pp);
/*
* Check if we can resume cached mode. This might
* be the case if the kpm mapping was the only
* mapping in conflict with other non rule
* compliant mappings. The page is no more marked
* as kpm mapped, so the conv_tnc path will not
* change kpm state.
*/
/* remove TSB entry only */
} else {
/* already demapped */
PP_CLRKPMC(pp);
}
kp->kp_refcnta--;
goto exit;
}
/*
* Fast path / regular case.
*/
/* remove go indication */
}
/* remove TSB entry */
#ifdef DEBUG
if (kpm_tlb_flush)
#endif
}
} else {
/*
* The VAC alias path.
* We come here if the kpm vaddr is not in any alias_range
* and we are unmapping a page within the regular kpm_page
* is in "small page" mode. If the page is not marked
* P_KPMS it couldn't have a valid PAGESIZE sized TSB
* entry. Dcache flushing is done lazy and follows the
* rules of the regular virtual page coloring scheme.
*
* Per page states and required actions:
* P_KPMC: remove a kpm mapping that is conflicting.
* P_KPMS: remove a small kpm mapping within a kpm_page.
* P_TNC: check if we can re-cache the page.
* P_PNC: we cannot re-cache, sorry.
* Per kpm_page:
* kp_refcntc > 0: page is part of a kpm_page with conflicts.
* kp_refcnts > 0: rm a small mapped page within a kpm_page.
*/
panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
(void *)kp);
}
/*
* Check if we can resume cached mode. This might
* be the case if the kpm mapping was the only
* mapping in conflict with other non rule
* compliant mappings. The page is no more marked
* as kpm mapped, so the conv_tnc path will not
* change kpm state.
*/
/*
* Uncached kpm mappings must always
* have forced "small page" mode.
*/
panic("sfmmu_kpm_mapout: uncached "
"page not kpm marked");
}
}
kp->kp_refcnts--;
PP_CLRKPMS(pp);
}
panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
(void *)kp);
}
PP_CLRKPMC(pp);
kp->kp_refcntc--;
}
}
exit:
return;
if (oldval != KPM_MAPPEDS) {
/*
* When we're called after sfmmu_kpm_hme_unload,
* KPM_MAPPEDSC is valid too.
*/
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_mapout: incorrect mapping");
}
/* remove TSB entry */
#ifdef DEBUG
if (kpm_tlb_flush)
#endif
panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
PP_CLRKPMC(pp);
/*
* Check if we can resume cached mode. This might be
* the case if the kpm mapping was the only mapping
* in conflict with other non rule compliant mappings.
* The page is no more marked as kpm mapped, so the
* conv_tnc path will not change the kpm state.
*/
} else {
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_mapout: inconsistent mapping");
PP_CLRKPMC(pp);
}
}
#define abs(x) ((x) < 0 ? -(x) : (x))
/*
* conflicts. Page mapping list and its vcolor parts must be protected.
*/
static caddr_t
{
} else {
}
*kpm_vac_rangep = 0;
}
return (vaddr);
if (PP_NEWPAGE(pp)) {
return (vaddr);
}
return (vaddr);
return (vaddr);
}
/*
* VAC conflict state bit values.
* The following defines are used to make the handling of the
* various input states more concise. For that the kpm states
* per kpm_page and per page are combined in a summary state.
* Each single state has a corresponding bit value in the
* summary state. These defines only apply for kpm large page
* mappings. Within comments the abbreviations "kc, c, ks, s"
* are used as short form of the actual state, e.g. "kc" for
* "kp_refcntc > 0", etc.
*/
/*
* Summary states used in sfmmu_kpm_fault (KPM_TSBM_*).
* See also more detailed comments within in the sfmmu_kpm_fault switch.
* Abbreviations used:
* CONFL: VAC conflict(s) within a kpm_page.
* MAPS: Mapped small: Page mapped in using a regular page size kpm mapping.
* RASM: Re-assembling of a large page mapping possible.
* RPLS: Replace: TSB miss due to TSB replacement only.
* BRKO: Breakup Other: A large kpm mapping has to be broken because another
* page within the kpm_page is already involved in a VAC conflict.
* BRKT: Breakup This: A large kpm mapping has to be broken, this page is
* is involved in a VAC conflict.
*/
#define KPM_TSBM_CONFL_GONE (0)
#define KPM_TSBM_MAPS_RASM (KPM_KS)
#define KPM_TSBM_MAPS_BRKO (KPM_KC)
/*
* kpm fault handler for mappings with large page size.
*/
int
{
int error;
kpm_page_t *kp;
int alias_range;
int uncached = 0;
int badstate;
}
if (!PP_ISMAPPED_KPM(pp)) {
return (EFAULT);
}
if (alias_range) {
if (kp->kp_refcnta > 0) {
PP_CLRKPMC(pp);
}
/*
* Check for vcolor conflicts. Return here
* w/ either no conflict (fast path), removed hme
* mapping chains (unload conflict) or uncached
* (uncache conflict). VACaches are cleaned and
* p_vcolor and PP_TNC are set accordingly for the
* conflict cases. Drop kpmp for uncache conflict
* cases since it will be grabbed within
* sfmmu_kpm_page_cache in case of an uncache
* conflict.
*/
uncached = 1;
PP_SETKPMC(pp);
}
goto smallexit;
} else {
/*
* We got a tsbmiss on a not active kpm_page range.
* Let segkpm_fault decide how to panic.
*/
}
goto exit;
}
/*
* We should come here only if trap level tsb miss
* handler is disabled.
*/
if (badstate == 0)
goto largeexit;
}
goto badstate_exit;
/*
* Combine the per kpm_page and per page kpm VAC states to
* a summary state in order to make the kpm fault handling
* more concise.
*/
switch (tsbmcase) {
case KPM_TSBM_CONFL_GONE: /* - - - - */
/*
* That's fine, we either have no more vac conflict in
* this kpm page or someone raced in and has solved the
* vac conflict for us -- call sfmmu_kpm_vac_conflict
* to take care for correcting the vcolor and flushing
* the dcache if required.
*/
panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
"state, pp=%p", (void *)pp);
}
goto largeexit;
case KPM_TSBM_MAPS_RASM: /* - - ks - */
/*
* All conflicts in this kpm page are gone but there are
* already small mappings around, so we also map this
* page small. This could be the trigger case for a
* small mapping reaper, if this is really needed.
* For now fall thru to the KPM_TSBM_MAPS handling.
*/
case KPM_TSBM_MAPS: /* kc - ks - */
/*
* Large page mapping is already broken, this page is not
* conflicting, so map it small. Call sfmmu_kpm_vac_conflict
* to take care for correcting the vcolor and flushing
* the dcache if required.
*/
panic("sfmmu_kpm_fault: inconsistent MAPS state, "
"pp=%p", (void *)pp);
}
kp->kp_refcnts++;
PP_SETKPMS(pp);
goto smallexit;
case KPM_TSBM_RPLS_RASM: /* - - ks s */
/*
* All conflicts in this kpm page are gone but this page
* is mapped small. This could be the trigger case for a
* small mapping reaper, if this is really needed.
* For now we drop it in small again. Fall thru to the
* KPM_TSBM_RPLS handling.
*/
case KPM_TSBM_RPLS: /* kc - ks s */
/*
* Large page mapping is already broken, this page is not
* conflicting but already mapped small, so drop it in
* small again.
*/
panic("sfmmu_kpm_fault: inconsistent RPLS state, "
"pp=%p", (void *)pp);
}
goto smallexit;
case KPM_TSBM_MAPS_BRKO: /* kc - - - */
/*
* The kpm page where we live in is marked conflicting
* but this page is not conflicting. So we have to map it
* in small. Call sfmmu_kpm_vac_conflict to take care for
* correcting the vcolor and flushing the dcache if required.
*/
panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, "
"pp=%p", (void *)pp);
}
kp->kp_refcnts++;
PP_SETKPMS(pp);
goto smallexit;
case KPM_TSBM_MAPS_BRKT: /* kc c - - */
case KPM_TSBM_MAPS_CONFL: /* kc c ks - */
if (!PP_ISMAPPED(pp)) {
/*
* We got a tsbmiss on kpm large page range that is
* marked to contain vac conflicting pages introduced
* by hme mappings. The hme mappings are all gone and
* must have bypassed the kpm alias prevention logic.
*/
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
(void *)pp);
}
/*
* Check for vcolor conflicts. Return here w/ either no
* conflict (fast path), removed hme mapping chains
* (unload conflict) or uncached (uncache conflict).
* Dcache is cleaned and p_vcolor and P_TNC are set
* accordingly. Drop kpmp for uncache conflict cases
* since it will be grabbed within sfmmu_kpm_page_cache
* in case of an uncache conflict.
*/
uncached = 1;
} else {
/*
* When an unload conflict is solved and there are
* no other small mappings around, we can resume
* largepage mode. Otherwise we have to map or drop
* in small. This could be a trigger for a small
* mapping reaper when this was the last conflict
* within the kpm page and when there are only
* other small mappings around.
*/
kp->kp_refcntc--;
PP_CLRKPMC(pp);
goto largeexit;
}
kp->kp_refcnts++;
PP_SETKPMS(pp);
goto smallexit;
case KPM_TSBM_RPLS_CONFL: /* kc c ks s */
if (!PP_ISMAPPED(pp)) {
/*
* We got a tsbmiss on kpm large page range that is
* marked to contain vac conflicting pages introduced
* by hme mappings. They are all gone and must have
* somehow bypassed the kpm alias prevention logic.
*/
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
(void *)pp);
}
/*
* This state is only possible for an uncached mapping.
*/
panic("sfmmu_kpm_fault: page not uncached, pp=%p",
(void *)pp);
}
uncached = 1;
goto smallexit;
default:
panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
}
/* tte assembly */
if (uncached == 0)
else
/* tsb dropin */
error = 0;
goto exit;
/* tte assembly */
/* tsb dropin */
if (kp->kp_refcntc == 0) {
/* Set "go" flag for TL tsbmiss handler */
}
error = 0;
} else
exit:
return (error);
}
/*
* kpm fault handler for mappings with small page size.
*/
int
{
int error = 0;
int oldval;
if (!PP_ISMAPPED_KPM(pp)) {
return (EFAULT);
}
/*
* kp_mapped lookup protected by mlist mutex
*/
/*
* Fast path tsbmiss
*/
/* tte assembly */
/* tsb dropin */
/*
* Recheck for vcolor conflicts. Return here w/ either
* no conflict, removed hme mapping chain (unload
* conflict) or uncached (uncache conflict). VACaches
* are cleaned and p_vcolor and PP_TNC are set accordingly
* for the conflict cases.
*/
/* ASSERT(pp->p_share); XXX use hat_page_getshare */
/* tte assembly */
/* tsb dropin */
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_fault_small: "
"stale smallpages mapping");
} else {
PP_CLRKPMC(pp);
}
/* tte assembly */
/* tsb dropin */
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_fault_small: "
"stale smallpages mapping");
}
} else {
/*
* We got a tsbmiss on a not active kpm_page range.
* Let decide segkpm_fault how to panic.
*/
}
return (error);
}
/*
*/
static void
{
int vcolor;
return;
return;
/*
* There could be no vcolor conflict between a large cached
* hme page and a non alias range kpm page (neither large nor
* small mapped). So if a hme conflict already exists between
* a constituent page of a large hme mapping and a shared small
* conflicting hme mapping, both mappings must be already
* uncached at this point.
*/
if (!PP_ISMAPPED(pp)) {
/*
* Previous hme user of page had a different color
* but since there are no current users
* we just flush the cache and change the color.
*/
return;
}
/*
* If we get here we have a vac conflict with a current hme
* mapping. This must have been established by forcing a wrong
* colored mapping, e.g. by using mmap(2) with MAP_FIXED.
*/
/*
* Check if any mapping is in same as or if it is locked
* since in that case we need to uncache.
*/
continue;
/*
* We have an uncache conflict
*/
return;
}
}
/*
* We have an unload conflict
*/
continue;
}
/*
* Unloads only does tlb flushes so we need to flush the
* dcache vcolor here.
*/
}
/*
* Remove all kpm mappings using kpme's for pp and check that
* all kpm mappings (w/ and w/o kpme's) are gone.
*/
void
{
panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
/* Add instance callback here here if needed later */
}
/*
* segkpm clients have unlocked the page and forgot to mapout
* we panic here.
*/
}
/*
* Remove a large kpm mapping from kernel TSB and all TLB's.
*/
static void
{
}
/*
* Remove a small kpm mapping from kernel TSB and all TLB's.
*/
static void
{
}
/*
* Demap a kpm mapping in all TLB's.
*/
static void
{
}
/*
* Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*).
* See also more detailed comments within in the sfmmu_kpm_vac_unload switch.
* Abbreviations used:
* BIG: Large page kpm mapping in use.
* CONFL: VAC conflict(s) within a kpm_page.
* INCR: Count of conflicts within a kpm_page is going to be incremented.
* DECR: Count of conflicts within a kpm_page is going to be decremented.
* UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped.
* TNC: Temporary non cached: a kpm mapped page is mapped in TNC state.
*/
#define KPM_VUL_BIG (0)
#define KPM_VUL_CONFL_INCR1 (KPM_KS)
#define KPM_VUL_CONFL_INCR2 (KPM_KC)
/*
* Handle VAC unload conflicts introduced by hme mappings or vice
* versa when a hme conflict mapping is replaced by a non conflict
* one. Perform actions and state transitions according to the
* various page and kpm_page entry states. VACache flushes are in
* the responsibiliy of the caller. We still hold the mlist lock.
*/
void
{
kpm_page_t *kp;
int newcolor;
int badstate = 0;
if (kpm_smallpages)
goto smallpages_vac_unload;
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
(void *)kp);
}
if (newcolor == 0)
goto exit;
PP_SETKPMC(pp);
} else if (newcolor == 0) {
PP_CLRKPMC(pp);
} else {
badstate++;
}
goto exit;
}
/*
* We should come here only if trap level tsb miss
* handler is disabled.
*/
} else {
}
if (badstate)
goto exit;
goto exit;
}
/*
* Combine the per kpm_page and per page kpm VAC states
* to a summary state in order to make the vac unload
* handling more concise.
*/
switch (vacunlcase) {
case KPM_VUL_BIG: /* - - - - */
/*
* Have to breakup the large page mapping to be
* able to handle the conflicting hme vaddr.
*/
/* remove go indication */
}
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_VUL_UNMAP_SMALL1: /* - - ks s */
case KPM_VUL_UNMAP_SMALL2: /* kc - ks s */
/*
* New conflict w/ an active kpm page, actually mapped
* update states.
*/
kp->kp_refcnts--;
kp->kp_refcntc++;
PP_CLRKPMS(pp);
PP_SETKPMC(pp);
break;
case KPM_VUL_CONFL_INCR1: /* - - ks - */
case KPM_VUL_CONFL_INCR2: /* kc - - - */
case KPM_VUL_CONFL_INCR3: /* kc - ks - */
/*
* New conflict on a active kpm mapped page not yet in
* count.
*/
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_VUL_CONFL_DECR1: /* kc c - - */
case KPM_VUL_CONFL_DECR2: /* kc c ks - */
/*
* A conflicting hme mapping is removed for an active
* the kpm_page conflict count.
*/
kp->kp_refcntc--;
PP_CLRKPMC(pp);
break;
case KPM_VUL_TNC: /* kc c ks s */
"page not in NC state");
/* FALLTHRU */
default:
badstate++;
}
exit:
if (badstate) {
panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
"kpmvaddr=%p kp=%p pp=%p",
}
return;
if (newcolor == 0)
return;
/*
* Stop TL tsbmiss handling
*/
panic("sfmmu_kpm_vac_unload: inconsistent mapping");
}
PP_SETKPMC(pp);
} else {
panic("sfmmu_kpm_vac_unload: inconsistent mapping");
}
}
/*
* Page is marked to be in VAC conflict to an existing kpm mapping
* or is kpm mapped using only the regular pagesize. Called from
* sfmmu_hblk_unload when a mlist is completely removed.
*/
void
{
/* tte assembly */
kpm_page_t *kp;
if (kpm_smallpages)
goto smallpages_hme_unload;
panic("sfmmu_kpm_hme_unload: page should be uncached");
/*
* Page mapped small but not involved in VAC conflict
*/
return;
}
if (IS_KPM_ALIAS_RANGE(vaddr)) {
panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
(void *)kp);
}
} else {
panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
(void *)kp);
}
kp->kp_refcntc--;
}
PP_CLRKPMC(pp);
return;
panic("sfmmu_kpm_hme_unload: page should be uncached");
panic("sfmmu_kpm_hme_unload: inconsistent mapping");
/*
* Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
* prevents TL tsbmiss handling and force a hat_kpm_fault.
* There we can start over again.
*/
PP_CLRKPMC(pp);
}
/*
* Special hooks for sfmmu_page_cache_array() when changing the
* cacheability of a page. It is used to obey the hat_kpm lock
* ordering (mlist -> kpmp -> spl, and back).
*/
{
kpm_page_t *kp;
return (NULL);
return (kpmp);
}
void
{
return;
}
/*
* Summary states used in sfmmu_kpm_page_cache (KPM_*).
* See also more detailed comments within in the sfmmu_kpm_page_cache switch.
* Abbreviations used:
* UNC: Input state for an uncache request.
* BIG: Large page kpm mapping in use.
* SMALL: Page has a small kpm mapping within a kpm_page range.
* NODEMAP: No demap needed.
* NOP: No operation needed on this input state.
* CACHE: Input state for a re-cache request.
* MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small.
* NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm
* mapped.
* NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm
* mapped. There are also other small kpm mappings within this
* kpm_page.
*/
#define KPM_UNC_BIG (0)
#define KPM_UNC_NODEMAP1 (KPM_KS)
#define KPM_UNC_NODEMAP2 (KPM_KC)
/*
* This function is called when the virtual cacheability of a page
* is changed and the page has an actice kpm mapping. The mlist mutex,
* the spl hash lock and the kpmp mutex (if needed) are already grabbed.
*/
/*ARGSUSED2*/
void
{
kpm_page_t *kp;
int badstate = 0;
int oldval;
panic("sfmmu_kpm_page_cache: bad flags");
/* Flush vcolor in DCache */
}
if (kpm_smallpages)
goto smallpages_page_cache;
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
panic("sfmmu_kpm_page_cache: bad refcnta "
"kpm_page=%p\n", (void *)kp);
}
PP_SETKPMC(pp);
} else {
PP_CLRKPMC(pp);
}
goto exit;
}
/*
* We should come here only if trap level tsb miss
* handler is disabled.
*/
} else {
}
if (badstate)
goto exit;
/*
* Combine the per kpm_page and per page kpm VAC states to
* handling more concise.
*/
switch (pgcacase) {
case KPM_CACHE_MAPS: /* kc c ks s */
panic("sfmmu_kpm_page_cache: bad refcnts "
"kpm_page=%p\n", (void *)kp);
}
kp->kp_refcnts--;
PP_CLRKPMS(pp);
/* FALLTHRU */
case KPM_CACHE_NOMAP: /* kc c - - */
case KPM_CACHE_NOMAPO: /* kc c ks - */
kp->kp_refcntc--;
PP_CLRKPMC(pp);
break;
default:
badstate++;
}
goto exit;
}
switch (pgcacase) {
case KPM_UNC_BIG: /* - - - - */
panic("sfmmu_kpm_page_cache: bad refcnt "
"kpm_page=%p\n", (void *)kp);
}
/*
* Have to breakup the large page mapping in preparation
* to the upcoming TNC mode handled by small mappings.
* The demap can already be done due to another conflict
* within the kpm_page.
*/
/* remove go indication */
}
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_UNC_SMALL1: /* - - ks s */
case KPM_UNC_SMALL2: /* kc - ks s */
/*
* Have to demap an already small kpm mapping in preparation
* to the upcoming TNC mode. The demap can already be done
* due to another conflict within the kpm_page.
*/
kp->kp_refcntc++;
kp->kp_refcnts--;
PP_CLRKPMS(pp);
PP_SETKPMC(pp);
break;
case KPM_UNC_NODEMAP1: /* - - ks - */
/* fallthru */
case KPM_UNC_NODEMAP2: /* kc - - - */
case KPM_UNC_NODEMAP3: /* kc - ks - */
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_UNC_NOP1: /* kc c - - */
case KPM_UNC_NOP2: /* kc c ks - */
break;
default:
badstate++;
}
exit:
if (badstate) {
panic("sfmmu_kpm_page_cache: inconsistent VAC state "
"kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
}
return;
/*
* marked as nogo for we will fault in and resolve it
* through sfmmu_kpm_fault_small
*/
panic("smallpages_page_cache: inconsistent mapping");
PP_SETKPMC(pp);
} else {
PP_CLRKPMC(pp);
}
/*
* Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
* prevents TL tsbmiss handling and force a hat_kpm_fault.
* There we can start over again.
*/
}