/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <vm/seg_kmem.h>
#include <vm/hat_sfmmu.h>
#include <sys/cpu_module.h>
/*
* A quick way to generate a cache consistent address to map in a page.
*
* consistent address by reserving a given amount of kernel address space.
* The base is PPMAPBASE and its size is PPMAPSIZE. This memory is divided
* into x number of sets, where x is the number of colors for the virtual
* cache. The number of colors is how many times a page can be mapped
* simulatenously in the cache. For direct map caches this translates to
* the number of pages in the cache.
* Each set will be assigned a group of virtual pages from the reserved memory
* depending on its virtual color.
* When trying to assign a virtual address we will find out the color for the
* physical page in question (if applicable). Then we will try to find an
* available virtual page from the set of the appropiate color.
*/
/* tuned by cpu module, default is "safe" */
#ifdef PPDEBUG
static int align_hits;
#endif /* PPDEBUG */
/*
* There are only 64 TLB entries on spitfire, 16 on cheetah
* (fully-associative TLB) so we allow the cpu module to tune the
* number to use here via pp_slots.
*/
static struct ppmap_va {
/* prevent compilation with VAC defined */
#ifdef VAC
#error "sun4v ppmapin and ppmapout do not support VAC"
#endif
void
ppmapinit(void)
{
int nset;
/*
* sun4v does not have a virtual indexed cache and simply
* has only one set containing all pages.
*/
ppmap_vaddrs[nset] =
}
}
/*
* Allocate a cache consistent virtual address to map a page, pp,
* with protection, vprot; and map it in the MMU, using the most
* efficient means possible. The argument avoid is a virtual address
* hint which when masked yields an offset into a virtual cache
* that should be avoided when allocating an address to map in a
* page. An avoid arg of -1 means you don't care, for instance pagezero.
*
* machine dependent, depends on virtual address space layout,
* understands that all kernel addresses have bit 31 set.
*
* NOTE: For sun4 platforms the meaning of the hint argument is opposite from
* that found in other architectures. In other architectures the hint
* (called avoid) was used to ask ppmapin to NOT use the specified cache color.
* This was used to avoid virtual cache trashing in the bcopy. Unfortunately
* in the case of a COW, this later on caused a cache aliasing conflict. In
* to worry about virtual cache trashing. Actually, by using the hint to choose
* the right color we can almost guarantee a cache conflict will not occur.
*/
/*ARGSUSED2*/
{
int nset;
#ifdef PPDEBUG
pp_allocs++;
#endif /* PPDEBUG */
/*
* For sun4v caches are physical caches, we can pick any address
* we want.
*/
#ifdef PPDEBUG
align_hits++;
#endif /* PPDEBUG */
va) {
vprot | HAT_NOSYNC,
return (va);
}
}
}
#ifdef PPDEBUG
#endif /* PPDEBUG */
/*
* No free slots; get a random one from the kernel heap area.
*/
return (va);
}
void
{
int nset;
/*
* Space came from kernelmap, flush the page and
* return the space.
*/
} else {
/*
* Space came from ppmap_vaddrs[], give it back.
*/
}
}
#ifdef DEBUG
#else
#endif /* DEBUG */
static void
{
}
/*
* Routine to copy kernel pages during relocation. It will copy one
* PAGESIZE page to another PAGESIZE page. This function may be called
* above LOCK_LEVEL so it should not grab any locks.
*/
void
{
}
/*
* Copy the data from the physical page represented by "frompp" to
* that represented by "topp".
*
* Try to use per cpu mapping first, if that fails then call pp_mapin
* to load it.
* Returns one on success or zero on some sort of fault while doing the copy.
*/
int
{
/*
* Try to map using KPM if enabled. If it fails, fall
*/
if ((kpm_enable == 0) ||
} else
ret = 0;
goto faulted;
}
no_fault();
/* Unmap */
if (fast) {
} else {
}
return (ret);
}
/*
* Zero the physical page from off to off + len given by `pp'
* without changing the reference and modified bits of page.
*
* Again, we'll try per cpu mapping first.
*/
void
{
extern int use_hw_bzero;
}
/*
* Try to use KPM if enabled. If that fails, fall back to
*/
if (kpm_enable != 0) {
} else
}
if (!use_hw_bzero) {
/*
* We may not have used block commit asi.
* So flush the I-$ manually
*/
} else {
/*
* We have used blk commit, and flushed the I-$.
* However we still may have an instruction in the
* pipeline. Only a flush will invalidate that.
*/
}
if (fast) {
} else {
}
}