ptxlate.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <amd64/amd64_page.h>
/*
* NOTE: ALL these routines assume that page tables are identity mapped (1:1,
* VA == PA). If they cannot access a given physical address by
* dereferencing the equivalent virtual address, they will fail due to
* the inability of the x86 to easily access any given physical address
* without explicitly mapping a special page specifically to do so.
*
* Since boot already maps the 32-bit page tables this way and we control
* creation of the 64-bit page tables, we can assure that this remains
* the case unless a callback is made from 64-bit mode that references a
* kernel-mapped page; doing THAT will likely cause a crash as the
* routines try to dereference an unmapped or wrong address.
*/
/*
* Routines to manage legacy 32-bit and long mode 64-bit page mappings
*/
void
{
/*
* If passed VA is not page aligned, make it so
* and add the offset to the length we need to map,
* so a map of va 0x3100, len 0x1000 becomes a map
* of va 0x3000, len 0x1100.
*/
}
while (len) {
long_ptbase)) {
/*
* We can skip the next map_pagesize bytes since
* they've already been mapped.
*/
#ifdef DEBUG
AMD64_MODE_LEGACY))) {
printf("WARNING: 64-bit va 0x%llx already "
"mapped, pa 0x%llx, len 0x%x!\n",
pagesize);
printf(" Will not remap address to pa "
"0x%llx as requested.\n",
}
#endif /* DEBUG */
#ifdef lint
#endif /* lint */
entry = 0;
}
if (entry) {
/*
* Valid page mapping for va found, so either add
* it to the current page range or map what we have
* and start a new range.
*/
if ((map_pagesize == AMD64_PAGESIZE4M) &&
}
if (!maplen) {
}
} else {
/*
* Range of mapped entries ends,
* so map what we've got and start
* a new range.
*/
/*
* Use current mapping as start of
* new range.
*/
if (!(AMD64_PAGEALIGNED(va,
pagesize))) {
}
} else {
/*
* Just increment mapping range
* by mapped pagesize.
*/
}
}
} else if (maplen) {
/*
* Found a bad map entry, so end the mapping range and
* translate the address range we have.
*/
maplen = 0;
}
}
/*
* If we ended with an outstanding range left to map, be sure to map it
* now.
*/
if (maplen)
}
void
{
/*
* If passed VA is not page aligned, make it so
* and add the offset to the length we need to map,
* so a map of va 0x3100, len 0x1000 becomes a map
* of va 0x3000, len 0x1100.
*/
}
while (len) {
&pagesize, legacy_ptbase)) {
/*
* We can skip the next map_pagesize bytes since
* they've already been mapped.
*/
#ifdef DEBUG
AMD64_MODE_LONG64))) {
printf("WARNING: 32-bit va 0x%llx already "
"mapped, pa 0x%llx, len 0x%x!\n",
printf(" Will not remap address to "
"pa 0x%llx as requested.\n",
}
#endif /* DEBUG */
#ifdef lint
#endif /* lint */
entry = 0;
}
: map_pagesize;
if (entry) {
/*
* Valid page mapping for va found, so either add
* it to the current page range or map what we have
* and start a new range.
*/
if ((map_pagesize == AMD64_PAGESIZE2M) &&
}
if (!maplen) {
}
} else {
/*
* Range of mapped entries ends,
* so map what we've got and start
* a new range.
*/
/*
* Use current mapping as start of
* new range.
*/
if (!(AMD64_PAGEALIGNED(va,
pagesize))) {
}
} else {
/*
* Just increment mapping range
* by mapped pagesize.
*/
}
}
} else if (maplen) {
/*
* Found a bad map entry, so end the mapping range and
* translate the address range we have.
*/
maplen = 0;
}
}
/*
* If we ended with an outstanding range left to map, be sure to map it
* now.
*/
if (maplen)
}
void
{
extern uint_t magic_phys;
/*
* The initial 64-bit page tables are setup with 0x200000:0x600000
* already identity mapped, so we can skip that range.
*/
/*
* Copy first 2M of boot's pages but SKIP PAGE ZERO.
*/
/*
* Now copy balance of boot's pages.
*
* The initial 64-bit page tables are setup with 0x200000:0x400000
* already identity mapped, so we can skip that range...
*/
}