/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <vm/seg_kmem.h>
#include <vm/hat_sfmmu.h>
#include <sys/cpu_module.h>
#include <sys/mem_cage.h>
/*
* A quick way to generate a cache consistent address to map in a page.
*
* consistent address by reserving a given amount of kernel address space.
* The base is PPMAPBASE and its size is PPMAPSIZE. This memory is divided
* into x number of sets, where x is the number of colors for the virtual
* cache. The number of colors is how many times a page can be mapped
* simulatenously in the cache. For direct map caches this translates to
* the number of pages in the cache.
* Each set will be assigned a group of virtual pages from the reserved memory
* depending on its virtual color.
* When trying to assign a virtual address we will find out the color for the
* physical page in question (if applicable). Then we will try to find an
* available virtual page from the set of the appropiate color.
*/
/* tuned by cpu module, default is "safe" */
#ifdef PPDEBUG
#endif /* PPDEBUG */
/*
* There are only 64 TLB entries on spitfire, 16 on cheetah
* (fully-associative TLB) so we allow the cpu module to tune the
* number to use here via pp_slots.
*/
static struct ppmap_va {
void
ppmapinit(void)
{
int a;
a = ppmap_pages;
while (a >>= 1)
ppmap_shift++;
} else {
/*
* If we do not have a virtual indexed cache we simply
* have only one set containing all pages.
*/
ppmap_pages = 1;
}
}
va += MMU_PAGESIZE;
}
}
/*
* Allocate a cache consistent virtual address to map a page, pp,
* with protection, vprot; and map it in the MMU, using the most
* efficient means possible. The argument avoid is a virtual address
* hint which when masked yields an offset into a virtual cache
* that should be avoided when allocating an address to map in a
* page. An avoid arg of -1 means you don't care, for instance pagezero.
*
* machine dependent, depends on virtual address space layout,
* understands that all kernel addresses have bit 31 set.
*
* NOTE: For sun4 platforms the meaning of the hint argument is opposite from
* that found in other architectures. In other architectures the hint
* (called avoid) was used to ask ppmapin to NOT use the specified cache color.
* This was used to avoid virtual cache trashing in the bcopy. Unfortunately
* in the case of a COW, this later on caused a cache aliasing conflict. In
* to worry about virtual cache trashing. Actually, by using the hint to choose
* the right color we can almost guarantee a cache conflict will not occur.
*/
{
#ifdef PPDEBUG
pp_allocs++;
#endif /* PPDEBUG */
if (color == -1) {
} else {
}
}
} else {
/*
* For physical caches, we can pick any address we want.
*/
color = 0;
}
do {
#ifdef PPDEBUG
align_hits[color]++;
#endif /* PPDEBUG */
vprot | HAT_NOSYNC,
return (va);
}
}
}
/*
* first pick didn't succeed, try another
*/
if (++color == ppmap_pages)
color = 0;
#ifdef PPDEBUG
#endif /* PPDEBUG */
/*
* No free slots; get a random one from the kernel heap area.
*/
return (va);
}
void
{
/*
* Space came from kernelmap, flush the page and
* return the space.
*/
} else {
/*
* Space came from ppmap_vaddrs[], give it back.
*/
}
}
#ifdef DEBUG
#else
#endif /* DEBUG */
/*
* Find a slot in per CPU page copy area. Load up a locked TLB in the
* running cpu. We don't call hat layer to load up the tte since the
* mapping is only temporary. If the thread migrates it'll get a TLB
* official hat record of this mapping.
*/
static caddr_t
{
int vcolor;
if (prot & TTE_HWWR_INT) {
} else {
}
/*
* If consistent handling is required then keep the current
* vcolor of the page. Furthermore, if loads or stores can
* pollute the VAC then using a "new" page (unassigned vcolor)
* won't work and we have to return a failure.
*/
if (pp_consistent_coloring & flags) {
if ((vcolor == -1) &&
return (NULL);
/* else keep the current vcolor of the page */
} else {
vcolor = -1;
}
if (vcolor != -1) {
} else {
start = 0;
stride = 1;
}
break;
}
}
if (i >= pp_slots) {
return (NULL);
}
/*
* Now we have a slot we can use, make the tte.
*/
return (va);
}
static void
{
}
/*
* Common copy routine which attempts to use hwblkpagecopy. If this routine
* can't be used, failure (0) will be returned. Otherwise, a PAGESIZE page
* will be copied and success (1) will be returned.
*/
int
{
/*
* If we can't use VIS block loads and stores we can't use
* pp_load_tlb/pp_unload_tlb due to the possibility of
* d$ aliasing.
*/
return (0);
return (0);
}
return (0);
}
ret = 0;
goto faulted;
}
no_fault();
return (ret);
}
/*
* Routine to copy kernel pages during relocation. It will copy one
* PAGESIZE page to another PAGESIZE page. This function may be called
* above LOCK_LEVEL so it should not grab any locks.
*/
void
{
}
/*
* Copy the data from the physical page represented by "frompp" to
* that represented by "topp".
*
* Try to use per cpu mapping first, if that fails then call pp_mapin
* to load it.
*
* Returns one on success or zero on some sort of fault while doing the copy.
*/
int
{
/* Try the fast path first */
return (1);
/*
* Try to map using KPM if enabled and we are the cageout thread.
*/
if (kpm_enable) {
if (curthread == kcage_cageout_thread)
}
if (use_kpm) {
}
}
/* do the slow path */
ret = 0;
goto faulted;
}
}
no_fault();
/* unmap */
} else {
}
return (ret);
}
/*
* Zero the physical page from off to off + len given by `pp'
* without changing the reference and modified bits of page.
*
* Again, we'll try per cpu mapping first.
*/
void
{
extern int use_hw_bzero;
/*
* Since the fast path doesn't do anything about
* VAC coloring, we make sure bcopy h/w will be used.
*/
fast = 0;
}
if (fast) {
}
/*
* We are here either length != MMU_PAGESIZE or pp_load_tlb()
* returns NULL or use_hw_bzero is disabled.
*/
fast = 0;
}
/*
* We may not have used block commit asi.
* So flush the I-$ manually
*/
} else {
/*
* We have used blk commit, and flushed the I-$. However we
* still may have an instruction in the pipeline. Only a flush
* instruction will invalidate that.
*/
}
if (fast) {
} else {
}
}