/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Copyright 2013 Joyent, Inc. All rights reserved.
*/
#include <sys/machparam.h>
#include <sys/x86_archext.h>
#include <sys/mach_mmu.h>
#include <sys/multiboot.h>
#include <util/strtolctype.h>
#if defined(__xpv)
#include <sys/hypervisor.h>
#else /* !__xpv */
extern multiboot_header_t mb_header;
extern int have_cpuid(void);
#endif /* !__xpv */
#include <sys/inttypes.h>
#include <sys/bootinfo.h>
#include <sys/mach_mmu.h>
#include <sys/boot_console.h>
#include "dboot_asm.h"
#include "dboot_printf.h"
#include "dboot_xboot.h"
#include "dboot_elfload.h"
/*
* This file contains code that runs to transition us from either a multiboot
* compliant loader (32 bit non-paging) or a XPV domain loader to
* regular kernel execution. Its task is to setup the kernel memory image
* and page tables.
*
* The code executes as:
* - 32 bits under GRUB (for 32 or 64 bit Solaris)
* - a 32 bit program for the 32-bit PV hypervisor
* - a 64 bit program for the 64-bit PV hypervisor (at least for now)
*
* Under the PV hypervisor, we must create mappings for any memory beyond the
* initial start of day allocation (such as the kernel itself).
*
* When on the metal, the mapping between maddr_t and paddr_t is 1:1.
* Since we are running in real mode, so all such memory is accessible.
*/
/*
* Standard bits used in PTE (page level) and PTP (internal levels)
*/
/*
* This is the target addresses (physical) where the kernel text and data
* nucleus pages will be unpacked. On the hypervisor this is actually a
* virtual address.
*/
/*
* The stack is setup in assembler before entering startup_kernel()
*/
/*
* Used to track physical memory allocation
*/
#if defined(__xpv)
/*
* Additional information needed for hypervisor memory allocation.
* Only memory up to scratch_end is mapped by page tables.
* mfn_base is the start of the hypervisor virtual image. It's ONE_GIG, so
* to derive a pfn from a pointer, you subtract mfn_base.
*/
#else /* __xpv */
/*
* If on the metal, then we have a multiboot loader.
*/
#endif /* __xpv */
/*
* This contains information passed to the kernel
*/
/*
* Page table and memory stuff.
*/
/*
* Information about processor MMU
*/
int amd64_support = 0;
int largepage_support = 0;
int pae_support = 0;
int pge_support = 0;
int NX_support = 0;
/*
* Low 32 bits of kernel entry address passed back to assembler.
* When running a 64 bit kernel, the high 32 bits are 0xffffffff.
*/
/*
* Memlists for the kernel. We shouldn't need a lot of these.
*/
/*
* This should match what's in the bootloader. It's arbitrary, but GRUB
* in particular has limitations on how much space it can use before it
* stops working properly. This should be enough.
*/
/*
* Debugging macros
*/
/*
* Either hypervisor-specific or grub-specific code builds the initial
*/
static void
sort_physinstall(void)
{
int i;
#if !defined(__xpv)
int j;
/*
* Now sort the memlists, in case they weren't in order.
* Yeah, this is a bubble sort; small, simple and easy to get right.
*/
DBG_MSG("Sorting phys-installed list\n");
for (j = memlists_used - 1; j > 0; --j) {
for (i = 0; i < j; ++i) {
continue;
}
}
/*
* Merge any memlists that don't have holes between them.
*/
for (i = 0; i <= memlists_used - 1; ++i) {
continue;
if (prom_debug)
--i; /* after merging we need to reexamine, so do this */
}
#endif /* __xpv */
if (prom_debug) {
dboot_printf("\nFinal memlists:\n");
for (i = 0; i < memlists_used; ++i) {
}
}
/*
* link together the memlists with native size pointers
*/
for (i = 1; i < memlists_used; ++i) {
}
}
/*
* build bios reserved memlists
*/
static void
build_rsvdmemlists(void)
{
int i;
rsvdmemlists[0].next = 0;
rsvdmemlists[0].prev = 0;
for (i = 1; i < rsvdmemlists_used; ++i) {
rsvdmemlists[i].prev =
rsvdmemlists[i].next = 0;
}
}
#if defined(__xpv)
/*
* halt on the hypervisor after a delay to drain console output
*/
void
dboot_halt(void)
{
uint_t i = 10000;
while (--i)
(void) HYPERVISOR_yield();
(void) HYPERVISOR_shutdown(SHUTDOWN_poweroff);
}
/*
* From a machine address, find the corresponding pseudo-physical address.
* Pseudo-physical address are contiguous and run from mfn_base in each VM.
* Machine addresses are the real underlying hardware addresses.
* These are needed for page table entries. Note that this routine is
* poorly protected. A bad value of "ma" will cause a page fault.
*/
{
return (-(paddr_t)1);
#ifdef DEBUG
#endif
return (pa);
}
/*
* From a pseudo-physical address, find the corresponding machine address.
*/
{
#ifdef DEBUG
dboot_printf("pa_to_ma(pfn=%lx) got %lx ma_to_pa() says %lx\n",
#endif
}
#endif /* __xpv */
{
if (pae_support)
}
/*ARGSUSED*/
void
{
#ifdef __xpv
mmu_update_t t;
int retcnt;
dboot_panic("HYPERVISOR_mmu_update() failed");
#else /* __xpv */
if (pae_support)
else
reload_cr3();
#endif /* __xpv */
}
{
else
#ifdef __xpv
/* Remove write permission to the new page table. */
dboot_panic("HYP_update_va_mapping error");
#endif
if (map_debug)
dboot_printf("new page table lvl=%d paddr=0x%lx ptp=0x%"
return (new_table);
}
x86pte_t *
{
}
/*
* dump out the contents of page tables...
*/
static void
dump_tables(void)
{
uint_t l;
int index;
int i;
char *table;
#if !defined(__xpv)
#endif /* !__xpv */
dboot_printf("Finished pagetables:\n");
l = top_level;
va = 0;
if (pae_support)
else
if (pteval == 0)
goto next_entry;
/*
* Don't try to walk hypervisor private pagetables
*/
save_table[l] = table;
save_index[l] = index;
--l;
index = -1;
goto recursion;
}
/*
* shorten dump for consecutive mappings
*/
if (pae_support)
else
if (pteval == 0)
break;
break;
}
if (i > 2) {
index += i - 2;
}
va = 0xffff800000000000ull;
;
}
if (l < top_level) {
++l;
index = save_index[l];
table = save_table[l];
goto recursion;
}
}
/*
* Add a mapping for the machine page at the given virtual address.
*/
static void
{
if (level > 0)
pteval |= PT_PAGESIZE;
#if defined(__xpv)
/*
* see if we can avoid find_pte() on the hypervisor
*/
UVMF_INVLPG | UVMF_LOCAL) == 0)
return;
#endif
/*
* Find the pte that will map this address. This creates any
* missing intermediate level page tables
*/
/*
* When paravirtualized, we must use hypervisor calls to modify the
* PTE, since paging is active. On real hardware we just write to
* the pagetables which aren't in use yet.
*/
#if defined(__xpv)
#else
if (pae_support)
else
#endif
}
/*
* Add a mapping for the physical page at the given virtual address.
*/
static void
{
}
/*
* This is called to remove start..end from the
* possible range of PCI addresses.
*/
static void
{
int i;
int j;
for (i = 0; i < pcimemlists_used; ++i) {
ml = &pcimemlists[i];
/* delete the entire range? */
for (j = i; j < pcimemlists_used; ++j)
--i; /* to revisit the new one at this index */
}
/* split a range? */
if (pcimemlists_used > MAX_MEMLIST)
dboot_panic("too many pcimemlists");
for (j = pcimemlists_used - 1; j > i; --j)
++ml;
++i; /* skip on to next one */
}
/* cut memory off the start? */
}
/* cut memory off the end? */
}
}
}
/*
* Xen strips the size field out of the mb_memory_map_t, see struct e820entry
* definition in Xen source.
*/
#ifdef __xpv
typedef struct {
} mmap_t;
#else
#endif
static void
{
int i;
/*
* initialize
*/
pcimemlists_used = 1;
/*
* Fill in PCI memlists.
*/
if (prom_debug)
/*
* page align start and end
*/
end &= ~page_offset;
continue;
}
/*
* Finish off the pcimemlist
*/
if (prom_debug) {
for (i = 0; i < pcimemlists_used; ++i) {
}
}
pcimemlists[0].next = 0;
pcimemlists[0].prev = 0;
for (i = 1; i < pcimemlists_used; ++i) {
pcimemlists[i].prev =
pcimemlists[i].next = 0;
}
}
#if defined(__xpv)
/*
* Initialize memory allocator stuff from hypervisor-supplied start info.
*
* There is 512KB of scratch area after the boot stack page.
* We'll use that for everything except the kernel nucleus pages which are too
* big to fit there and are allocated last anyway.
*/
static void
init_mem_alloc(void)
{
DBG_MSG("Entered init_mem_alloc()\n");
/*
* Free memory follows the stack. There's at least 512KB of scratch
* space, rounded up to at least 2Mb alignment. That should be enough
* for the page tables we'll need to build. The nucleus memory is
* allocated last and will be outside the addressible range. We'll
* switch to new page tables before we unpack the kernel
*/
/*
* For paranoia, leave some space between hypervisor data and ours.
* Use 500 instead of 512.
*/
/*
* The domain builder gives us at most 1 module
*/
} else {
bi->bi_module_cnt = 0;
}
/*
* Using pseudo-physical addresses, so only 1 memlist element
*/
memlists_used = 1;
/*
* finish building physinstall list
*/
/*
* build bios reserved memlists
*/
if (DOMAIN_IS_INITDOMAIN(xen_info)) {
/*
* build PCI Memory list
*/
/*LINTED: constant in conditional context*/
dboot_panic("getting XENMEM_machine_memory_map failed");
}
}
#else /* !__xpv */
static uint8_t
dboot_a2h(char v)
{
if (v >= 'a')
return (v - 'a' + 0xa);
else if (v >= 'A')
return (v - 'A' + 0xa);
else if (v >= '0')
return (v - '0');
else
dboot_panic("bad ASCII hex character %c\n", v);
return (0);
}
static void
{
unsigned int i;
for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
}
}
/*
* Generate a SHA-1 hash of the first len bytes of image, and compare it with
* the ASCII-format hash found in the 40-byte buffer at ascii. If they
* match, return 0, otherwise -1. This works only for images smaller than
* 4 GB, which should not be a problem.
*/
static int
{
const char *ascii;
const void *image;
unsigned int i;
for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
return (-1);
}
return (0);
}
static const char *
{
switch (type) {
case BMT_ROOTFS:
return ("rootfs");
case BMT_FILE:
return ("file");
case BMT_HASH:
return ("hash");
default:
return ("unknown");
}
}
static void
check_images(void)
{
uint_t i;
for (i = 0; i < modules_used; i++) {
if (prom_debug) {
dboot_printf("module #%d: name %s type %s "
"addr %lx size %lx\n",
}
DBG_MSG("module has no hash; skipping check\n");
continue;
}
(void) memcpy(displayhash,
if (prom_debug) {
dboot_printf("checking expected hash [%s]: ",
}
if (check_image_hash(i) != 0)
dboot_panic("hash mismatch!\n");
else
DBG_MSG("OK\n");
}
}
/*
* Determine the module's starting address, size, name, and type, and fill the
* boot_modules structure. This structure is used by the bop code, except for
* hashes which are checked prior to transferring control to the kernel.
*/
static void
{
char *p, *q;
if (prom_debug) {
dboot_printf("\tmodule #%d: '%s' at 0x%lx, end 0x%lx\n",
}
dboot_panic("module #%d: module start address 0x%lx greater "
"than end address 0x%lx", midx,
}
/*
* A brief note on lengths and sizes: GRUB, for reasons unknown, passes
* the address of the last valid byte in a module plus 1 as mod_end.
* This is of course a bug; the multiboot specification simply states
* that mod_start and mod_end "contain the start and end addresses of
* the boot module itself" which is pretty obviously not what GRUB is
* doing. However, fixing it requires that not only this code be
* changed but also that other code consuming this value and values
* derived from it be fixed, and that the kernel and GRUB must either
* both have the bug or neither. While there are a lot of combinations
* that will work, there are also some that won't, so for simplicity
* we'll just cope with the bug. That means we won't actually hash the
* byte at mod_end, and we will expect that mod_end for the hash file
* itself is one greater than some multiple of 41 (40 bytes of ASCII
* hash plus a newline for each module). We set bm_size to the true
* correct number of bytes in each module, achieving exactly this.
*/
return;
}
while (p != NULL) {
q = strsep(&p, " \t\f\n\r");
}
continue;
}
continue;
q += 5;
if (strcmp(q, "rootfs") == 0) {
} else if (strcmp(q, "hash") == 0) {
} else if (strcmp(q, "file") != 0) {
dboot_printf("\tmodule #%d: unknown module "
"type '%s'; defaulting to 'file'",
midx, q);
}
continue;
}
}
continue;
}
dboot_printf("ignoring unknown option '%s'\n", q);
}
}
/*
* Backward compatibility: if there are exactly one or two modules, both
* of type 'file' and neither with an embedded hash value, we have been
* given the legacy style modules. In this case we need to treat the first
* module as a rootfs and the second as a hash referencing that module.
* Otherwise, even if the configuration is invalid, we assume that the
* operator knows what he's doing or at least isn't being bitten by this
* interface change.
*/
static void
fixup_modules(void)
{
return;
return;
}
return;
}
if (modules_used > 1) {
}
}
/*
* For modules that do not have assigned hashes but have a separate hash module,
* find the assigned hash module and set the primary module's bm_hash to point
* to the hash data from that module. We will then ignore modules of type
* BMT_HASH from this point forward.
*/
static void
assign_module_hashes(void)
{
uint_t i, j;
for (i = 0; i < modules_used; i++) {
continue;
}
for (j = 0; j < modules_used; j++) {
continue;
}
dboot_printf("Short hash module of length "
"0x%lx bytes; ignoring\n",
} else {
}
break;
}
}
}
/*
* During memory allocation, find the highest address not used yet.
*/
static void
{
if (a < next_avail_addr)
return;
}
/*
* Walk through the module information finding the last used address.
* The first available address will become the top level page table.
*
* We then build the phys_install memlist from the multiboot information.
*/
static void
init_mem_alloc(void)
{
extern char _end[];
int i;
DBG_MSG("Entered init_mem_alloc()\n");
dboot_panic("Too many modules (%d) -- the maximum is %d.",
}
/*
* search the modules to find the last used address
* we'll build the module list while we're walking through here
*/
DBG_MSG("\nFinding Modules\n");
i < mb_info->mods_count;
++mod, ++i) {
}
check_images();
/*
* Walk through the memory map from multiboot and build our memlist
* structures. Note these will have native format pointers.
*/
DBG_MSG("\nFinding Memory Map\n");
max_mem = 0;
int cnt = 0;
++cnt;
if (prom_debug)
/*
* page align start and end
*/
end &= ~page_offset;
continue;
/*
* only type 1 is usable RAM
*/
case 1:
if (memlists_used > MAX_MEMLIST)
dboot_panic("too many memlists");
break;
case 2:
if (rsvdmemlists_used > MAX_MEMLIST)
dboot_panic("too many rsvdmemlists");
break;
default:
continue;
}
}
/*
* Old platform - assume I/O space at the end of memory.
*/
pcimemlists[0].addr =
pcimemlists[0].next = 0;
pcimemlists[0].prev = 0;
} else {
dboot_panic("No memory info from boot loader!!!");
}
/*
* finish processing the physinstall list
*/
/*
* build bios reserved mem lists
*/
}
#endif /* !__xpv */
/*
* Simple memory allocator, allocates aligned physical memory.
* Note that startup_kernel() only allocates memory, never frees.
* Memory usage just grows in an upward direction.
*/
static void *
{
uint_t i;
/*
* make sure size is a multiple of pagesize
*/
/*
* XXPV fixme joe
*
* a really large bootarchive that causes you to run out of memory
* may cause this to blow up
*/
/* LINTED E_UNEXPECTED_UINT_PROMOTION */
for (i = 0; i < memlists_used; ++i) {
#if defined(__xpv)
#endif
/*
* did we find the desired address?
*/
goto done;
}
/*
* if not is this address the best so far?
*/
}
/*
* We didn't find exactly the address we wanted, due to going off the
* end of a memory region. Return the best found memory address.
*/
done:
#if defined(__xpv)
if (next_avail_addr > scratch_end)
dboot_panic("Out of mem next_avail: 0x%lx, scratch_end: "
#endif
}
void *
{
}
/*
* Build page tables to map all of memory used so far as well as the kernel.
*/
static void
build_page_tables(void)
{
#if !defined(__xpv)
uint32_t i;
#endif /* __xpv */
/*
* If we're on metal, we need to create the top level pagetable.
*/
#if defined(__xpv)
#else /* __xpv */
#endif /* __xpv */
/*
* Determine if we'll use large mappings for kernel, then map it.
*/
if (largepage_support) {
level = 1;
} else {
level = 0;
}
DBG_MSG("Mapping kernel\n");
/*
* The kernel will need a 1 page window to work with page tables
*/
#if defined(__xpv)
if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
/* If this is a domU we're done. */
DBG_MSG("\nPage tables constructed\n");
return;
}
#endif /* __xpv */
/*
* We need 1:1 mappings for the lower 1M of memory to access
* BIOS tables used by a couple of drivers during boot.
*
* The following code works because our simple memory allocator
* only grows usage in an upwards direction.
*
* Note that by this point in boot some mappings for low memory
* may already exist because we've already accessed device in low
* memory. (Specifically the video frame buffer and keyboard
* status ports.) If we're booting on raw hardware then GRUB
* created these mappings for us. If we're booting under a
* hypervisor then we went ahead and remapped these devices into
* memory allocated within dboot itself.
*/
if (map_debug)
dboot_printf("1:1 map pa=0..1Meg\n");
#if defined(__xpv)
#else /* __xpv */
#endif /* __xpv */
}
#if !defined(__xpv)
for (i = 0; i < memlists_used; ++i) {
if (map_debug)
start += MMU_PAGESIZE;
}
}
#endif /* !__xpv */
DBG_MSG("\nPage tables constructed\n");
}
#define NO_MULTIBOOT \
"multiboot is no longer used to boot the Solaris Operating System.\n\
The grub entry should be changed to:\n\
module$ /platform/i86pc/$ISADIR/boot_archive\n\
See http://illumos.org/msg/SUNOS-8000-AK for details.\n"
/*
* startup_kernel has a pretty simple job. It builds pagetables which reflect
* 1:1 mappings for all memory in use. It then also adds mappings for
* the kernel nucleus at virtual address of target_kernel_text using large page
* mappings. The page table pages are also accessible at 1:1 mapped
* virtual addresses.
*/
/*ARGSUSED*/
void
startup_kernel(void)
{
char *cmdline;
#if defined(__xpv)
#endif /* __xpv */
/*
* At this point we are executing in a 32 bit real mode.
*/
#if defined(__xpv)
#else /* __xpv */
#endif /* __xpv */
#if defined(__xpv)
/*
* For dom0, before we initialize the console subsystem we'll
* need to enable io operations, so set I/O priveldge level to 1.
*/
if (DOMAIN_IS_INITDOMAIN(xen_info)) {
}
#endif /* __xpv */
DBG_MSG("\n\nSolaris prekernel set: ");
DBG_MSG("\n");
}
/*
* boot info must be 16 byte aligned for 64 bit kernel ABI
*/
/*
* Need correct target_kernel_text value
*/
#if defined(_BOOT_TARGET_amd64)
#else
#endif
#if defined(__xpv)
/*
* XXPV Derive this stuff from CPUID / what the hypervisor has enabled
*/
#if defined(_BOOT_TARGET_amd64)
/*
* 64-bit hypervisor.
*/
amd64_support = 1;
pae_support = 1;
#else /* _BOOT_TARGET_amd64 */
/*
* See if we are running on a PAE Hypervisor
*/
{
dboot_panic("HYPERVISOR_xen_version(caps) failed");
if (prom_debug)
pae_support = 1;
}
#endif /* _BOOT_TARGET_amd64 */
{
if (HYPERVISOR_xen_version(XENVER_platform_parameters, &p) != 0)
dboot_panic("HYPERVISOR_xen_version(parms) failed");
DBG(p.virt_start);
}
/*
* The hypervisor loads stuff starting at 1Gig
*/
/*
* enable writable page table mode for the hypervisor
*/
dboot_panic("HYPERVISOR_vm_assist(writable_pagetables) failed");
/*
* check for NX support
*/
if (pae_support) {
if (eax >= 0x80000001) {
eax = 0x80000001;
if (edx & CPUID_AMD_EDX_NX)
NX_support = 1;
}
}
#if !defined(_BOOT_TARGET_amd64)
/*
* The 32-bit hypervisor uses segmentation to protect itself from
* guests. This means when a guest attempts to install a flat 4GB
* code or data descriptor the 32-bit hypervisor will protect itself
* by silently shrinking the segment such that if the guest attempts
* any access where the hypervisor lives a #gp fault is generated.
* The problem is that some applications expect a full 4GB flat
* segment for their current thread pointer and will use negative
* offset segment wrap around to access data. TLS support in linux
* brand is one example of this.
*
* The 32-bit hypervisor can catch the #gp fault in these cases
* and emulate the access without passing the #gp fault to the guest
* but only if VMASST_TYPE_4gb_segments is explicitly turned on.
* Seems like this should have been the default.
* Either way, we want the hypervisor -- and not Solaris -- to deal
* to deal with emulating these accesses.
*/
VMASST_TYPE_4gb_segments) < 0)
dboot_panic("HYPERVISOR_vm_assist(4gb_segments) failed");
#endif /* !_BOOT_TARGET_amd64 */
#else /* __xpv */
/*
* use cpuid to enable MMU features
*/
if (have_cpuid()) {
eax = 1;
if (edx & CPUID_INTC_EDX_PSE)
largepage_support = 1;
if (edx & CPUID_INTC_EDX_PGE)
pge_support = 1;
if (edx & CPUID_INTC_EDX_PAE)
pae_support = 1;
eax = 0x80000000;
if (eax >= 0x80000001) {
eax = 0x80000001;
if (edx & CPUID_AMD_EDX_LM)
amd64_support = 1;
if (edx & CPUID_AMD_EDX_NX)
NX_support = 1;
}
} else {
dboot_printf("cpuid not supported\n");
}
#endif /* __xpv */
#if defined(_BOOT_TARGET_amd64)
if (amd64_support == 0)
dboot_panic("long mode not supported, rebooting");
else if (pae_support == 0)
dboot_panic("long mode, but no PAE; rebooting");
#else
/*
* Allow the command line to over-ride use of PAE for 32 bit.
*/
pae_support = 0;
NX_support = 0;
amd64_support = 0;
}
#endif
/*
* initialize the simple memory allocator
*/
#if !defined(__xpv) && !defined(_BOOT_TARGET_amd64)
/*
* disable PAE on 32 bit h/w w/o NX and < 4Gig of memory
*/
pae_support = 0;
#endif
/*
* configure mmu information
*/
if (pae_support) {
ptes_per_table = 512;
pte_size = 8;
#if defined(_BOOT_TARGET_amd64)
top_level = 3;
#else
top_level = 2;
#endif
} else {
pae_support = 0;
NX_support = 0;
ptes_per_table = 1024;
pte_size = 4;
top_level = 1;
}
#if defined(__xpv)
#else
#endif
#if !defined(__xpv) && defined(_BOOT_TARGET_amd64)
/*
* For grub, copy kernel bits from the ELF64 file to final place.
*/
DBG_MSG("\nAllocating nucleus pages.\n");
if (ktext_phys == 0)
dboot_panic("failed to allocate aligned kernel memory");
dboot_panic("failed to parse kernel ELF image, rebooting");
#endif
/*
* Allocate page tables.
*/
/*
* return to assembly code to switch to running kernel
*/
#if defined(__xpv)
/*
* unmap unused pages in start area to make them available for DMA
*/
while (next_avail_addr < scratch_end) {
0, UVMF_INVLPG | UVMF_LOCAL);
}
#else /* __xpv */
#endif /* __xpv */
#ifndef __xpv
if (map_debug)
dump_tables();
#endif
DBG_MSG("\n\n*** DBOOT DONE -- back to asm to jump to kernel\n\n");
}