ppage.c revision 1e2e7a75ddb1eedcefa449ce98fd5862749b72ee
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <vm/seg_kmem.h>
#include <vm/hat_sfmmu.h>
#include <sys/cpu_module.h>
/*
* A quick way to generate a cache consistent address to map in a page.
*
* consistent address by reserving a given amount of kernel address space.
* The base is PPMAPBASE and its size is PPMAPSIZE. This memory is divided
* into x number of sets, where x is the number of colors for the virtual
* cache. The number of colors is how many times a page can be mapped
* simulatenously in the cache. For direct map caches this translates to
* the number of pages in the cache.
* Each set will be assigned a group of virtual pages from the reserved memory
* depending on its virtual color.
* When trying to assign a virtual address we will find out the color for the
* physical page in question (if applicable). Then we will try to find an
* available virtual page from the set of the appropiate color.
*/
/* tuned by cpu module, default is "safe" */
static int nsets; /* number of sets */
static int ppmap_pages; /* generate align mask */
static int ppmap_shift; /* set selector */
#ifdef PPDEBUG
static int ppalloc_noslot = 0; /* # of allocations from kernelmap */
static int align_hits[MAXCOLORS];
static int pp_allocs; /* # of ppmapin requests */
#endif /* PPDEBUG */
/*
* There are only 64 TLB entries on spitfire, 16 on cheetah
* (fully-associative TLB) so we allow the cpu module to tune the
* number to use here via pp_slots.
*/
static struct ppmap_va {
void
ppmapinit(void)
{
int a;
a = ppmap_pages;
while (a >>= 1)
ppmap_shift++;
} else {
/*
* If we do not have a virtual indexed cache we simply
* have only one set containing all pages.
*/
ppmap_pages = 1;
}
}
va += MMU_PAGESIZE;
}
}
/*
* Allocate a cache consistent virtual address to map a page, pp,
* with protection, vprot; and map it in the MMU, using the most
* efficient means possible. The argument avoid is a virtual address
* hint which when masked yields an offset into a virtual cache
* that should be avoided when allocating an address to map in a
* page. An avoid arg of -1 means you don't care, for instance pagezero.
*
* machine dependent, depends on virtual address space layout,
* understands that all kernel addresses have bit 31 set.
*
* NOTE: For sun4 platforms the meaning of the hint argument is opposite from
* that found in other architectures. In other architectures the hint
* (called avoid) was used to ask ppmapin to NOT use the specified cache color.
* This was used to avoid virtual cache trashing in the bcopy. Unfortunately
* in the case of a COW, this later on caused a cache aliasing conflict. In
* to worry about virtual cache trashing. Actually, by using the hint to choose
* the right color we can almost guarantee a cache conflict will not occur.
*/
{
#ifdef PPDEBUG
pp_allocs++;
#endif /* PPDEBUG */
if (color == -1) {
} else {
}
}
} else {
/*
* For physical caches, we can pick any address we want.
*/
color = 0;
}
do {
#ifdef PPDEBUG
align_hits[color]++;
#endif /* PPDEBUG */
vprot | HAT_NOSYNC,
return (va);
}
}
}
/*
* first pick didn't succeed, try another
*/
if (++color == ppmap_pages)
color = 0;
#ifdef PPDEBUG
#endif /* PPDEBUG */
/*
* No free slots; get a random one from the kernel heap area.
*/
return (va);
}
void
{
/*
* Space came from kernelmap, flush the page and
* return the space.
*/
} else {
/*
* Space came from ppmap_vaddrs[], give it back.
*/
}
}
#ifdef DEBUG
#else
#define PP_STAT_ADD(stat)
#endif /* DEBUG */
static void
{
}
/*
* Routine to copy kernel pages during relocation. It will copy one
* PAGESIZE page to another PAGESIZE page. This function may be called
* above LOCK_LEVEL so it should not grab any locks.
*/
void
{
}
/*
* Copy the data from the physical page represented by "frompp" to
* that represented by "topp".
*
* Try to use per cpu mapping first, if that fails then call pp_mapin
* to load it.
*/
void
{
/*
* Try to map using KPM. If it fails, fall back to
*/
} else
/* Unmap */
if (fast) {
} else {
}
}
/*
* Zero the physical page from off to off + len given by `pp'
* without changing the reference and modified bits of page.
*
* Again, we'll try per cpu mapping first.
*/
void
{
extern int use_hw_bzero;
}
/*
* Try to use KPM. If that fails, fall back to
*/
}
if (!use_hw_bzero) {
/*
* We may not have used block commit asi.
* So flush the I-$ manually
*/
} else {
/*
* We have used blk commit, and flushed the I-$.
* However we still may have an instruction in the
* pipeline. Only a flush will invalidate that.
*/
}
if (fast) {
} else {
}
}