startup.c revision 90e485ac5483c6f365541900489a833d23c151a3
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/autoconf.h>
#include <sys/privregs.h>
#include <sys/bootconf.h>
#include <sys/ndi_impldefs.h>
#include <sys/ddidmareq.h>
#include <vm/seg_kmem.h>
#include <sys/vm_machparam.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/smp_impldefs.h>
#include <sys/x86_archext.h>
#include <sys/segments.h>
#include <sys/kobj_lex.h>
#include <sys/cpc_impl.h>
#include <sys/x86_archext.h>
#include <sys/cpu_module.h>
extern void progressbar_init(void);
extern void progressbar_start(void);
extern void brand_init(void);
/*
* XXX make declaration below "static" when drivers no longer use this
* interface.
*/
/*
* segkp
*/
extern int segkp_fromheap;
static void kvm_init(void);
static void startup_init(void);
static void startup_memlist(void);
static void startup_modules(void);
static void startup_bop_gone(void);
static void startup_vm(void);
static void startup_end(void);
/*
* Declare these as initialized data so we can patch them.
*/
#ifdef __i386
/*
* Due to virtual address space limitations running in 32 bit mode, restrict
* the amount of physical memory configured to a max of PHYSMEM32 pages (16g).
*
* If the physical max memory size of 64g were allowed to be configured, the
* size of user virtual address space will be less than 1g. A limited user
* address space greatly reduces the range of applications that can run.
*
* If more physical memory than PHYSMEM32 is required, users should preferably
* run in 64 bit mode which has no virtual address space limitation issues.
*
* than PHYSMEM32 is required in 32 bit mode, physmem can be set to the desired
* value or to 0 (to configure all available memory) via eeprom(1M). kernelbase
* should also be carefully tuned to balance out the need of the user
* application while minimizing the risk of kernel heap exhaustion due to
* kernelbase being set too high.
*/
#define PHYSMEM32 0x400000
#else
#endif
char *kobj_file_buf;
int kobj_file_bufsize; /* set in /etc/system */
/* Global variables for MP support. Used in mp_startup */
int auto_lpg_disable = 1;
/*
* Some CPUs have holes in the middle of the 64-bit virtual address range.
*/
/*
* kpm mapping window
*/
static int kpm_desired = 0; /* Do we want to try to use segkpm? */
/*
* VA range that must be preserved for boot until we release all of its
* mappings.
*/
#if defined(__amd64)
static void *kmem_setaside;
#endif
/*
* Configuration parameters set at boot time.
*/
char bootblock_fstype[16];
char kern_bootargs[OBP_MAXPATHLEN];
/*
* ZFS zio segment. This allows us to exclude large portions of ZFS data that
* gets cached in kmem caches on the heap. If this is set to zero, we allocate
* zio buffers from their own segment, otherwise they are allocated from the
* heap. The optimization of allocating zio buffers from their own segment is
* only valid on 64-bit kernels.
*/
#if defined(__amd64)
int segzio_fromheap = 0;
#else
int segzio_fromheap = 1;
#endif
/*
* new memory fragmentations are possible in startup() due to BOP_ALLOCs. this
* depends on number of BOP_ALLOC calls made and requested size, memory size
* combination and whether boot.bin memory needs to be freed.
*/
#define POSS_NEW_FRAGMENTS 12
/*
* VM data structures
*/
long page_hashsz; /* Size of page hash table (power of two) */
#if defined(__amd64)
#else
#endif
#if defined(__amd64)
#else
#endif
struct memseg *memseg_base;
struct vnode unused_pages_vp;
#define FOURGB 0x100000000LL
struct memlist **);
/*
* kphysm_init returns the number of pages that were processed
*/
/*
* a couple useful roundup macros
*/
#define ROUND_UP_PAGE(x) \
#define ROUND_UP_LPAGE(x) \
#define ROUND_UP_4MEG(x) \
#define ROUND_UP_TOPLEVEL(x) \
/*
* 32-bit Kernel's Virtual memory layout.
* +-----------------------+
* | psm 1-1 map |
* | exec args area |
* 0xFFC00000 -|-----------------------|- ARGSBASE
* | debugger |
* 0xFF800000 -|-----------------------|- SEGDEBUGBASE
* | Kernel Data |
* 0xFEC00000 -|-----------------------|
* | Kernel Text |
* 0xFE800000 -|-----------------------|- KERNEL_TEXT
* | LUFS sinkhole |
* 0xFE000000 -|-----------------------|- lufs_addr
* --- -|-----------------------|- valloc_base + valloc_sz
* | early pp structures |
* | memsegs, memlists, |
* | page hash, etc. |
* --- -|-----------------------|- valloc_base (floating)
* | ptable_va |
* 0xFDFFE000 -|-----------------------|- ekernelheap, ptable_va
* | | (segkp is an arena under the heap)
* | |
* | kvseg |
* | |
* | |
* --- -|-----------------------|- kernelheap (floating)
* | Segkmap |
* 0xC3002000 -|-----------------------|- segkmap_start (floating)
* | Red Zone |
* 0xC3000000 -|-----------------------|- kernelbase / userlimit (floating)
* | | ||
* | Shared objects | \/
* | |
* : :
* | user data |
* |-----------------------|
* | user text |
* 0x08048000 -|-----------------------|
* | user stack |
* : :
* | invalid |
* 0x00000000 +-----------------------+
*
*
* 64-bit Kernel's Virtual memory layout. (assuming 64 bit app)
* +-----------------------+
* | psm 1-1 map |
* | exec args area |
* 0xFFFFFFFF.FFC00000 |-----------------------|- ARGSBASE
* | debugger (?) |
* 0xFFFFFFFF.FF800000 |-----------------------|- SEGDEBUGBASE
* | unused |
* +-----------------------+
* | Kernel Data |
* 0xFFFFFFFF.FBC00000 |-----------------------|
* | Kernel Text |
* 0xFFFFFFFF.FB800000 |-----------------------|- KERNEL_TEXT
* | LUFS sinkhole |
* 0xFFFFFFFF.FB000000 -|-----------------------|- lufs_addr
* --- |-----------------------|- valloc_base + valloc_sz
* | early pp structures |
* | memsegs, memlists, |
* | page hash, etc. |
* --- |-----------------------|- valloc_base
* | ptable_va |
* --- |-----------------------|- ptable_va
* | Core heap | (used for loadable modules)
* 0xFFFFFFFF.C0000000 |-----------------------|- core_base / ekernelheap
* | Kernel |
* | heap |
* 0xFFFFFXXX.XXX00000 |-----------------------|- kernelheap (floating)
* | segkmap |
* 0xFFFFFXXX.XXX00000 |-----------------------|- segkmap_start (floating)
* | device mappings |
* 0xFFFFFXXX.XXX00000 |-----------------------|- toxic_addr (floating)
* | segzio |
* 0xFFFFFXXX.XXX00000 |-----------------------|- segzio_base (floating)
* | segkp |
* --- |-----------------------|- segkp_base
* | segkpm |
* 0xFFFFFE00.00000000 |-----------------------|
* | Red Zone |
* 0xFFFFFD80.00000000 |-----------------------|- KERNELBASE
* | User stack |- User space memory
* | |
* | shared objects, etc | (grows downwards)
* : :
* | |
* 0xFFFF8000.00000000 |-----------------------|
* | |
* | VA Hole / unused |
* | |
* 0x00008000.00000000 |-----------------------|
* | |
* | |
* : :
* | user heap | (grows upwards)
* | |
* | user data |
* |-----------------------|
* | user text |
* 0x00000000.04000000 |-----------------------|
* | invalid |
* 0x00000000.00000000 +-----------------------+
*
* A 32 bit app on the 64 bit kernel sees the same layout as on the 32 bit
* kernel, except that userlimit is raised to 0xfe000000
*
* Floating values:
*
* valloc_base: start of the kernel's memory management/tracking data
* structures. This region contains page_t structures for the lowest 4GB
* of physical memory, memsegs, memlists, and the page hash.
*
* core_base: start of the kernel's "core" heap area on 64-bit systems.
* This area is intended to be used for global data as well as for module
* restricted to a 2GB range, allowing every address within it to be
* accessed using rip-relative addressing
*
* ekernelheap: end of kernelheap and start of segmap.
*
* kernelheap: start of kernel heap. On 32-bit systems, this starts right
* above a red zone that separates the user's address space from the
* kernel's. On 64-bit systems, it sits above segkp and segkpm.
*
* segkmap_start: start of segmap. The length of segmap can be modified
* The default length is 16MB on 32-bit systems and 64MB on 64-bit systems.
*
* kernelbase: On a 32-bit kernel the default value of 0xd4000000 will be
* decreased by 2X the size required for page_t. This allows the kernel
* heap to grow in size with physical memory. With sizeof(page_t) == 80
* bytes, the following shows the values of kernelbase and kernel heap
* sizes for different memory configurations (assuming default segmap and
* segkp sizes).
*
* mem size for kernelbase kernel heap
* size page_t's size
* ---- --------- ---------- -----------
* 1gb 0x01400000 0xd1800000 684MB
* 2gb 0x02800000 0xcf000000 704MB
* 4gb 0x05000000 0xca000000 744MB
* 6gb 0x07800000 0xc5000000 784MB
* 8gb 0x0a000000 0xc0000000 824MB
* 16gb 0x14000000 0xac000000 984MB
* 32gb 0x28000000 0x84000000 1304MB
* 64gb 0x50000000 0x34000000 1944MB (*)
*
* kernelbase is less than the abi minimum of 0xc0000000 for memory
* configurations above 8gb.
*
* (*) support for memory configurations above 32gb will require manual tuning
* of kernelbase to balance out the need of user applications.
*/
/* real-time-clock initialization parameters */
long gmt_lag; /* offset in seconds of gmt to local time */
extern long process_rtc_config_file(void);
char *final_kernelheap;
char *boot_kernelheap;
static uintptr_t segmap_reserved;
int segmapfreelists;
/*
* List of bootstrap pages. We mark these as allocated in startup.
* release_bootstrap() will free them when we're completely done with
* the bootstrap.
*/
struct system_hardware system_hardware;
/*
* Enable some debugging messages concerning memory usage...
*
* XX64 There should only be one print routine once memlist usage between
* vmx and the kernel is cleaned up and there is a single memlist structure
* shared between kernel and boot.
*/
static void
{
}
}
static void
{
}
}
/*
* XX64 need a comment here.. are these just default values, surely
* we read the "cpuid" type information to figure this out.
*/
int l2cache_sz = 0x80000;
int l2cache_linesz = 0x40;
int l2cache_assoc = 1;
/*
* on 64 bit we use a predifined VA range for mapping devices in the kernel
* on 32 bit the mappings are intermixed in the heap, so we use a bit map
*/
#ifdef __amd64
#else /* __i386 */
#endif /* __i386 */
/*
* Simple boot time debug facilities
*/
static char *prm_dbg_str[] = {
"%s:%d: '%s' is 0x%x\n",
"%s:%d: '%s' is 0x%llx\n"
};
int prom_debug;
#define PRM_DEBUG(q) if (prom_debug) \
#define PRM_POINT(q) if (prom_debug) \
/*
* This structure is used to keep track of the intial allocations
* done in startup_memlist(). The value of NUM_ALLOCATIONS needs to
* be >= the number of ADD_TO_ALLOCATIONS() executed in the code.
*/
#define NUM_ALLOCATIONS 7
int num_allocations = 0;
struct {
void **al_ptr;
if (num_allocations == NUM_ALLOCATIONS) \
panic("too many ADD_TO_ALLOCATIONS()"); \
++num_allocations; \
}
static void
perform_allocations(void)
{
int i;
panic("BOP_ALLOC() failed");
for (i = 0; i < num_allocations; ++i) {
}
}
/*
* Our world looks like this at startup time.
*
* In a 32-bit OS, boot loads the kernel text at 0xfe800000 and kernel data
* at 0xfec00000. On a 64-bit OS, kernel text and data are loaded at
* 0xffffffff.fe800000 and 0xffffffff.fec00000 respectively. Those
* addresses are fixed in the binary at link time.
*
* On the text page:
*
* On the data page:
*/
/*
* Machine-dependent startup code
*/
void
startup(void)
{
extern void startup_bios_disk(void);
extern void startup_pci_bios(void);
/*
* Make sure that nobody tries to use sekpm until we have
* initialized it properly.
*/
#if defined(__amd64)
#endif
kpm_enable = 0;
startup_init();
startup_vm();
startup_end();
}
static void
{
PRM_POINT("startup_init() starting...");
/*
* Complete the extraction of cpuid data
*/
/*
* Check for prom_debug in boot environment
*/
++prom_debug;
PRM_POINT("prom_debug found in boot enviroment");
}
/*
* Collect node, cpu and memory configuration information.
*/
/*
* Halt if this is an unsupported processor.
*/
printf("\n486 processor (\"%s\") detected.\n",
CPU->cpu_brandstr);
halt("This processor is not supported by this release "
"of Solaris.");
}
PRM_POINT("startup_init() done");
}
/*
* everything mapped above KERNEL_TEXT) pages from phys_avail. Note it
* also filters out physical page zero. There is some reliance on the
* boot loader allocating only a few contiguous physical memory chunks.
*/
static void
{
if (prom_debug)
/*
* page zero is required for BIOS.. never make it available
*/
if (*addr == 0) {
*addr += MMU_PAGESIZE;
*size -= MMU_PAGESIZE;
}
/*
* First we trim from the front of the range. Since hat_boot_probe()
* to the list until no changes are seen. This deals with the case
* where page "p" is mapped at v, page "p + PAGESIZE" is mapped at w
* but w < v.
*/
do {
change = 0;
for (va = KERNEL_TEXT;
change = 1;
*addr += MMU_PAGESIZE;
*size -= MMU_PAGESIZE;
len -= MMU_PAGESIZE;
}
}
}
if (change && prom_debug)
} while (change);
/*
* Trim pages from the end of the range.
*/
for (va = KERNEL_TEXT;
}
if (prom_debug)
}
static void
kpm_init()
{
struct segkpm_crargs b;
/*
* These variables were all designed for sfmmu in which segkpm is
* mapped using a single pagesize - either 8KB or 4MB. On x86, we
* might use 2+ page sizes on a single machine, so none of these
* variables have a single correct value. They are set up as if we
* always use a 4KB pagesize, which should do no harm. In the long
* run, we should get rid of KPM's assumption that only a single
* pagesize is used.
*/
kpmp2pshft = 0;
kpmpnpgs = 1;
PRM_POINT("about to create segkpm");
panic("cannot attach segkpm");
b.nvcolors = 1;
panic("segkpm_create segkpm");
/*
* Map each of the memsegs into the kpm segment, coalesing adjacent
* memsegs to allow mapping with the largest possible pages.
*/
pmem = phys_install;
for (;;) {
break;
}
}
}
/*
* The purpose of startup memlist is to get the system to the
* point where it can use kmem_alloc()'s that operate correctly
* relying on BOP_ALLOC(). This includes allocating page_ts,
* page hash table, vmem initialized, etc.
*
* Boot's versions of physinstalled and physavail are insufficient for
* the kernel's purposes. Specifically we don't know which pages that
* are not in physavail can be reclaimed after boot is gone.
*
* This code solves the problem by dividing the address space
* into 3 regions as it takes over the MMU from the booter.
*
* 1) Any (non-nucleus) pages that are mapped at addresses above KERNEL_TEXT
* can not be used by the kernel.
*
* 2) Any free page that happens to be mapped below kernelbase
* is protected until the boot loader is released, but will then be reclaimed.
*
* 3) Boot shouldn't use any address in the remaining area between kernelbase
* and KERNEL_TEXT.
*
* In the case of multiple mappings to the same page, region 1 has precedence
* over region 2.
*/
static void
startup_memlist(void)
{
int memblocks;
pgcnt_t orig_npages = 0;
extern void startup_build_mem_nodes(struct memlist *);
/* XX64 fix these - they should be in include files */
extern void page_coloring_setup(caddr_t);
PRM_POINT("startup_memlist() starting...");
/*
* Take the most current snapshot we can by calling mem-update.
* For this to work properly, we first have to ask boot for its
* end address.
*/
/*
* find if the kernel is mapped on a large page
*/
va = KERNEL_TEXT;
panic("Couldn't find kernel text boot mapping");
/*
*/
if (len > MMU_PAGESIZE) {
} else {
PRM_POINT("Kernel NOT loaded on Large Page!");
}
/*
* For MP machines cr4_value must be set or the non-boot
* CPUs will not be able to start.
*/
if (x86_feature & X86_LARGEPAGE)
/*
* Examine the boot loaders physical memory map to find out:
* - total memory in system - physinstalled
* - the max physical address - physmax
* - the number of segments the intsalled memory comes in
*/
if (prom_debug)
print_boot_memlist("boot physinstalled",
&physinstalled, &memblocks);
if (prom_debug)
print_boot_memlist("boot physavail",
/*
* Initialize hat's mmu parameters.
* Check for enforce-prot-exec in boot environment. It's used to
* The default is to enforce PROT_EXEC on processors that support NX.
* Boot seems to round up the "len", but 8 seems to be big enough.
*/
mmu_init();
#ifdef __i386
/*
* physmax is lowered if there is more memory than can be
*/
if (PFN_ABOVE64G(physmax)) {
}
} else {
if (PFN_ABOVE4G(physmax)) {
}
}
#endif
char value[8];
if (len < 8)
else
}
/*
* We will need page_t's for every page in the system, except for
* memory mapped at or above above the start of the kernel text segment.
*
* pages above e_modtext are attributed to kernel debugger (obp_pages)
*/
obp_pages = 0;
va = KERNEL_TEXT;
}
/*
* If physmem is patched to be non-zero, use it instead of
* the computed value unless it is larger than the real
* amount of memory on hand.
*/
}
/*
* We now compute the sizes of all the initial allocations for
* structures the kernel needs in order do kmem_alloc(). These
* include:
* memsegs
* memlists
* page hash table
* page_t's
* page coloring data structs
*/
/*
* Reserve space for phys_avail/phys_install memlists.
* There's no real good way to know exactly how much room we'll need,
* but this should be a good upper bound.
*/
(memblocks + POSS_NEW_FRAGMENTS));
/*
* The page structure hash table size is a power of 2
* such that the average hash chain length is PAGE_HASHAVELEN.
*/
/*
* Set aside room for the page structures themselves. Note: on
* 64-bit systems we don't allocate page_t's for every page here.
* We just allocate enough to map the lowest 4GB of physical
* memory, minus those pages that are used for the "nucleus" kernel
* text and data. The remaining pages are allocated once we can
* map around boot.
*
* boot_npages is used to allocate an area big enough for our
* initial page_t's. kphym_init may use less than that.
*/
#if defined(__amd64)
#endif
/*
* determine l2 cache info and memory size for page coloring
*/
(void) getl2cacheinfo(CPU,
/*
* valloc_base will be below kernel text
* The extra pages are for the HAT and kmdb to map page tables.
*/
#if defined(__amd64)
"systems.");
#else /* __i386 */
/*
* We configure kernelbase based on:
*
* 1. user specified kernelbase via eeprom command. Value cannot exceed
* KERNELBASE_MAX. we large page align eprom_kernelbase
*
* 2. Default to KERNELBASE and adjust to 2X less the size for page_t.
* On large memory systems we must lower kernelbase to allow
* enough room for page_t's for all of memory.
*
* The value set here, might be changed a little later.
*/
if (eprom_kernelbase) {
if (kernelbase > KERNELBASE_MAX)
} else {
}
core_size = 0;
#endif
/*
* At this point, we can only use a portion of the kernelheap that
* will be available after we boot. Both 32-bit and 64-bit systems
* have this limitation, although the reasons are completely
* different.
*
* On 64-bit systems, the booter only supports allocations in the
* upper 4GB of memory, so we have to work with a reduced kernel
* heap until we take over all allocations. The booter also sits
* in the lower portion of that 4GB range, so we have to raise the
* bottom of the heap even further.
*
* On 32-bit systems we have to leave room to place segmap below
* the heap. We don't yet know how large segmap will be, so we
* have to be very conservative.
*/
#if defined(__amd64)
/*
* XX64: For now, we let boot have the lower 2GB of the top 4GB
* address range. In the long run, that should be fixed. It's
* insane for a booter to need 2 2GB address ranges.
*/
segmap_reserved = 0;
#else /* __i386 */
segkp_fromheap = 1;
#endif
ekernelheap = (char *)core_base;
/*
* If segmap is too large we can push the bottom of the kernel heap
* higher than the base. Or worse, it could exceed the top of the
* VA space entirely, causing it to wrap around.
*/
panic("too little memory available for kernelheap,"
" use a different kernelbase");
/*
* Now that we know the real value of kernelbase,
* update variables that were initialized with a value of
*
* XXX The problem with this sort of hackery is that the
* compiler just may feel like putting the const declarations
* (in param.c) into the .text section. Perhaps they should
* just be declared as variables there?
*/
#if defined(__amd64)
/*
* As one final sanity check, verify that the "red zone" between
* kernel and userspace is exactly the size we expected.
*/
#else
#endif
/*
* do all the initial allocations
*/
/*
* Initialize the kernel heap. Note 3rd argument must be > 1st.
*/
/*
* Build phys_install and phys_avail in kernel memspace.
* - phys_install should be all memory in the system.
* - phys_avail is phys_install minus any memory mapped before this
* point above KERNEL_TEXT.
*/
panic("physinstalled was too big!");
if (prom_debug)
PRM_POINT("Building phys_avail:\n");
panic("physavail was too big!");
if (prom_debug)
/*
* setup page coloring
*/
page_lock_init(); /* currently a no-op */
/*
* free page list counters
*/
(void) page_ctrs_alloc(page_ctrs_mem);
/*
* Initialize the page structures from the memory lists.
*/
PRM_POINT("Calling kphysm_init()...");
PRM_POINT("kphysm_init() done");
/*
* Now that page_t's have been initialized, remove all the
* initial allocation pages from the kernel free page lists.
*/
/*
* Initialize kernel memory allocator.
*/
kmem_init();
/*
* print this out early so that we know what's going on
*/
/*
* Initialize bp_mapin().
*/
/*
* orig_npages is non-zero if physmem has been configured for less
* than the available memory.
*/
if (orig_npages) {
#ifdef __i386
/*
* use npages for physmem in case it has been temporarily
* modified via /etc/system in kmem_init/mod_read_system_file.
*/
" address space limitations, limiting"
" physmem to 0x%lx of 0x%lx available pages",
} else {
}
#else
#endif
}
#if defined(__i386)
"System using 0x%lx",
#endif
#ifdef KERNELBASE_ABI_MIN
}
#endif
PRM_POINT("startup_memlist() done");
}
static void
startup_modules(void)
{
unsigned int i;
extern void prom_setup(void);
PRM_POINT("startup_modules() starting...");
/*
* Initialize ten-micro second timer so that drivers will
* not get short changed in their init phase. This was
* not getting called until clkinit which, on fast cpu's
* caused the drv_usecwait to be way too short.
*/
microfind();
/*
* Read the GMT lag from /etc/rtc_config.
*/
/*
* Calculate default settings of system parameters based upon
*/
param_calc(0);
mod_setup();
/*
* Initialize system parameters.
*/
param_init();
/*
* Initialize the default brands
*/
brand_init();
/*
* maxmem is the amount of physical memory we're playing with.
*/
/*
* Initialize the hat layer.
*/
hat_init();
/*
* Initialize segment management stuff.
*/
seg_init();
halt("Can't load specfs");
halt("Can't load devfs");
halt("Can't load dev");
dispinit();
/*
* This is needed here to initialize hw_serial[] for cluster booting.
*/
(void) modunload(i);
else
/* Read cluster configuration data. */
clconf_init();
/*
* Create a kernel device tree. First, create rootnex and
* then invoke bus specific code to probe devices.
*/
setup_ddi();
/*
* Set up the CPU module subsystem. Modifies the device tree, so it
* must be done after setup_ddi().
*/
cmi_init();
/*
* Initialize the MCA handlers
*/
if (x86_feature & X86_MCA)
cmi_mca_init();
/*
*/
prom_setup();
/*
* Load all platform specific modules
*/
psm_modload();
PRM_POINT("startup_modules() done");
}
static void
startup_bop_gone(void)
{
PRM_POINT("startup_bop_gone() starting...");
/*
* Do final allocations of HAT data structures that need to
* be allocated before quiescing the boot loader.
*/
PRM_POINT("Calling hat_kern_alloc()...");
PRM_POINT("hat_kern_alloc() done");
/*
* Setup MTRR (Memory type range registers)
*/
setup_mtrr();
PRM_POINT("startup_bop_gone() done");
}
/*
* Walk through the pagetables looking for pages mapped in by boot. If the
* setaside flag is set the pages are expected to be returned to the
* kernel later in boot, so we add them to the bootpages list.
*/
static void
{
pgcnt_t boot_protect_cnt = 0;
panic("0x%lx byte mapping at 0x%p exceeds boot's "
while (len > 0) {
if (setaside == 0)
panic("Unexpected mapping by boot. "
"addr=%p pfn=%lx\n",
}
++pfn;
len -= MMU_PAGESIZE;
va += MMU_PAGESIZE;
}
}
}
static void
startup_vm(void)
{
struct segmap_crargs a;
extern void hat_kern_setup(void);
extern int use_brk_lpg, use_stk_lpg;
PRM_POINT("startup_vm() starting...");
/*
* The next two loops are done in distinct steps in order
* to be sure that any page that is doubly mapped (both above
* KERNEL_TEXT and below kernelbase) is dealt with correctly.
* Note this may never happen, but it might someday.
*/
PRM_POINT("Protecting boot pages");
/*
* Protect any pages mapped above KERNEL_TEXT that somehow have
* page_t's. This can only happen if something weird allocated
*/
/*
* Before we can take over memory allocation/mapping from the boot
* loader we must remove from our free page lists any boot pages that
* will stay mapped until release_bootstrap().
*/
#if defined(__amd64)
#endif
/*
* Copy in boot's page tables, set up extra page tables for the kernel,
* and switch to the kernel's context.
*/
PRM_POINT("Calling hat_kern_setup()...");
/*
* It is no longer safe to call BOP_ALLOC(), so make sure we don't.
*/
PRM_POINT("hat_kern_setup() done");
/*
* Before we call kvm_init(), we need to establish the final size
* of the kernel's heap. So, we need to figure out how much space
* to set aside for segkp, segkpm, and segmap.
*/
#if defined(__amd64)
if (kpm_desired) {
/*
* Segkpm appears at the bottom of the kernel's address
* range. To detect accidental overruns of the user
* address space, we leave a "red zone" of unmapped memory
* between kernelbase and the beginning of segkpm.
*/
}
if (!segkp_fromheap) {
/*
* determine size of segkp and adjust the bottom of the
* kernel's heap.
*/
sz = SEGKPDEFSIZE;
"segkpsize has been reset to %ld pages",
}
}
if (!segzio_fromheap) {
/* size is in bytes, segziosize is in pages */
if (segziosize == 0) {
} else {
}
/* max size is 3/4ths of physmem */
if (size < SEGZIOMINSIZE) {
}
}
/*
* put the range of VA for device mappings next
*/
#endif
/*
* If the variable is tuned through eeprom, there is no upper
* bound on the size of segmap. If it is tuned through
* planned for in startup_memlist().
*/
#if defined(__i386)
if (segmapsize > segmap_reserved) {
}
/*
* 32-bit systems don't have segkpm or segkp, so segmap appears at
* the bottom of the kernel's address range. Set aside space for a
* red zone just below the start of segmap.
*/
#endif
/*
* Initialize VM system
*/
PRM_POINT("Calling kvm_init()...");
kvm_init();
PRM_POINT("kvm_init() done");
/*
* Tell kmdb that the VM system is now working
*/
/*
* Mangle the brand string etc.
*/
/*
* Now that we can use memory outside the top 4GB (on 64-bit
* systems) and we know the size of segmap, we can set the final
* size of the kernel's heap. Note: on 64-bit systems we still
* can't touch anything in the bottom half of the top 4GB range
* because boot still has pages mapped there.
*/
if (final_kernelheap < boot_kernelheap) {
#if defined(__amd64)
MMU_PAGESIZE, 0, 0, (void *)(BOOT_DOUBLEMAP_BASE),
(void *)(BOOT_DOUBLEMAP_BASE + BOOT_DOUBLEMAP_SIZE),
if (kmem_setaside == NULL)
panic("Could not protect boot's memory");
#endif
}
/*
* Now that the kernel heap may have grown significantly, we need
* to make all the remaining page_t's available to back that memory.
*
* XX64 this should probably wait till after release boot-strap too.
*/
if (pages_left > 0) {
}
#if defined(__amd64)
/*
*/
#else /* __i386 */
/*
* allocate the bit map that tracks toxic pages
*/
#endif /* __i386 */
/*
* Now that we've got more VA, as well as the ability to allocate from
* it, tell the debugger.
*/
/*
* The following code installs a special page fault handler (#pf)
* to work around a pentium bug.
*/
#if !defined(__amd64)
if (x86_type == X86_TYPE_P5) {
panic("failed to install pentium_pftrap");
}
#endif /* !__amd64 */
/*
* Map page pfn=0 for drivers, such as kd, that need to pick up
* parameters left there by controllers/BIOS.
*/
PRM_POINT("setup up p0_va");
/*
* disable automatic large pages for small memory systems or
* when the disable flag is set.
*/
}
use_brk_lpg = 0;
use_stk_lpg = 0;
}
if (mmu.max_page_level > 0) {
}
PRM_POINT("Calling hat_init_finish()...");
PRM_POINT("hat_init_finish() done");
/*
* Initialize the segkp segment type.
*/
if (!segkp_fromheap) {
segkp) < 0) {
panic("startup: cannot attach segkp");
/*NOTREACHED*/
}
} else {
/*
* For 32 bit x86 systems, we will have segkp under the heap.
* There will not be a segkp segment. We do, however, need
* to fill in the seg structure.
*/
}
if (segkp_create(segkp) != 0) {
panic("startup: segkp_create failed");
/*NOTREACHED*/
}
/*
* kpm segment
*/
segmap_kpm = 0;
if (kpm_desired) {
kpm_init();
kpm_enable = 1;
vpm_enable = 1;
}
/*
* Now create segmap segment.
*/
panic("cannot attach segkmap");
/*NOTREACHED*/
}
/*
* The 64 bit HAT permanently maps only segmap's page tables.
* The 32 bit HAT maps the heap's page tables too.
*/
#if defined(__amd64)
#else /* __i386 */
#endif /* __i386 */
a.shmsize = 0;
a.nfreelist = segmapfreelists;
panic("segmap_create segkmap");
segdev_init();
pmem_init();
PRM_POINT("startup_vm() done");
}
static void
startup_end(void)
{
extern void setx86isalist(void);
PRM_POINT("startup_end() starting...");
/*
* Perform tasks that get done after most of the VM
* initialization has been done but before the clock
* and other devices get started.
*/
kern_setup1();
/*
* Perform CPC initialization for this CPU.
*/
#if defined(__amd64)
/*
* XX64 -- include SSE, SSE2, etc. here too?
*/
if ((x86_feature & X86_ASYSC) == 0) {
}
#endif
#if defined(OPTERON_WORKAROUND_6323525)
#endif
/*
* Configure the system.
*/
PRM_POINT("Calling configure()...");
configure(); /* set up devices */
PRM_POINT("configure() done");
/*
* Set the isa_list string to the defined instruction sets we
* support.
*/
psm_install();
/*
* We're done with bootops. We don't unmap the bootstrap yet because
* we're still using bootsvcs.
*/
PRM_POINT("zeroing out bootops");
PRM_POINT("Enabling interrupts");
(*picinitf)();
sti();
PRM_POINT("startup_end() done");
}
extern char hw_serial[];
void
post_startup(void)
{
/*
* Set the system wide, processor-specific flags to be passed
* to userland via the aux vector for performance hints and
* instruction set extensions.
*/
bind_hwcap();
/*
* Load the System Management BIOS into the global ksmbios handle,
* if an SMBIOS is present on this system.
*/
/*
* Startup memory scrubber.
*/
/*
* Complete CPU module initialization
*/
/*
*/
/*
* ON4.0: Force /proc module in until clock interrupt handle fixed
*/
#if defined(__i386)
/*
* Check for required functional Floating Point hardware,
* unless FP hardware explicitly disabled.
*/
halt("No working FP hardware found");
#endif
/*
* Perform the formal initialization of the boot chip,
* and associate the boot cpu with it.
* This must be done after the cpu node for CPU has been
* added to the device tree, when the necessary probing to
* know the chip type and chip "id" is performed.
*/
}
static int
{
}
void
release_bootstrap(void)
{
int root_is_ramdisk;
extern void kobj_boot_unmountroot(void);
/* unmount boot ramdisk and release kmem usage */
/*
* We're finished using the boot loader so free its pages.
*/
PRM_POINT("Unmapping lower boot pages");
#if defined(__amd64)
PRM_POINT("Unmapping upper boot pages");
#endif
/*
* If root isn't on ramdisk, destroy the hardcoded
* ramdisk node now and release the memory. Else,
* ramdisk memory is kept in rd_pages.
*/
if (!root_is_ramdisk) {
(void) ddi_remove_child(dip, 0);
}
PRM_POINT("Releasing boot pages");
while (bootpages) {
continue;
}
}
/*
* Find 1 page below 1 MB so that other processors can boot up.
* Make sure it has a kernel VA as well as a 1:1 mapping.
* We should have just free'd one up.
*/
if (use_mp) {
continue;
break;
}
panic("No page available for starting "
"other processors");
}
#if defined(__amd64)
PRM_POINT("Returning boot's VA space to kernel heap");
if (kmem_setaside != NULL)
#endif
}
/*
* Initialize the platform-specific parts of a page_t.
*/
void
{
}
/*
* kphysm_init() initializes physical memory.
*/
static pgcnt_t
{
struct memseg *cur_memseg;
pgcnt_t total_skipped = 0;
pgcnt_t pages_done = 0;
int dobreak = 0;
extern pfn_t ddiphysmin;
/*
* In a 32 bit kernel can't use higher memory if we're
* not booting in PAE mode. This check takes care of that.
*/
continue;
/*
* align addr and size - they may not be at page boundaries
*/
if ((addr & MMU_PAGEOFFSET) != 0) {
addr += MMU_PAGEOFFSET;
}
/* only process pages below or equal to physmax */
if (num == 0)
continue;
if (total_skipped < start) {
total_skipped += num;
continue;
}
}
if (num == 0)
continue;
pages_done += num;
/*
* If the caller didn't provide space for the page
* structures, carve them out of the memseg they will
* represent.
*/
if (num <= 1)
continue;
/*
* Compute how many of the pages we need to use for
* page_ts
*/
--pp_pgs;
"the system.", num);
continue;
}
}
if (prom_debug)
" pgs=0x%lx pfn 0x%lx-0x%lx\n",
/*
* drop pages below ddiphysmin to simplify ddi memory
* allocation with non-zero addr_lo requests.
*/
if (base_pfn < ddiphysmin) {
/* drop entire range below ddiphysmin */
continue;
}
/* adjust range to ddiphysmin */
}
/*
* Build the memsegs entry
*/
/*
* insert in memseg list in decreasing pfn range order.
* Low memory is typically more fragmented such that this
* ordering keeps the larger ranges at the front of the list
* for code that searches memseg.
*/
for (;;) {
/* empty memsegs */
break;
}
/* check for continuity with start of memsegpp */
/*
* contiguous pfn and page_t's. Merge
* cur_memseg into *memsegpp. Drop
* cur_memseg
*/
(*memsegpp)->pages_base =
/*
* check if contiguous with the end of
* the next memseg.
*/
((*memsegpp)->pages_base ==
cur_memseg = *memsegpp;
dobreak = 1;
} else {
break;
}
} else {
/*
* contiguous pfn but not page_t's.
* to prevent creation of large pages
* with noncontiguous page_t's if not
* aligned to largest page boundary.
*/
page_num_pagesizes() - 1);
if (cur_memseg->pages_end &
(largepgcnt - 1)) {
num--;
cur_memseg->epages--;
cur_memseg->pages_end--;
}
}
}
/* check for continuity with end of memsegpp */
/*
* contiguous pfn and page_t's. Merge
* cur_memseg into *memsegpp. Drop
* cur_memseg.
*/
if (dobreak) {
/* merge previously done */
cur_memseg->pages =
(*memsegpp)->pages_base;
cur_memseg->next =
} else {
}
break;
}
/*
* contiguous pfn but not page_t's.
* to prevent creation of large pages
* with noncontiguous page_t's if not
* aligned to largest page boundary.
*/
page_num_pagesizes() - 1);
num--;
base_pfn++;
cur_memseg->pages++;
cur_memseg->pages_base++;
}
if (dobreak)
break;
}
if (cur_memseg->pages_base >=
*memsegpp = cur_memseg;
break;
}
break;
}
}
/*
* add_physmem() initializes the PSM part of the page
* struct by calling the PSM back with add_physmem_cb().
* In addition it coalesces pages into larger pages as
* it initializes them.
*/
cur_memseg++;
availrmem_initial += num;
/*
* If the caller provided the page frames to us, then
* advance in that list. Otherwise, prepare to allocate
* our own page frames for the next memseg.
*/
}
return (pages_done);
}
/*
* Kernel VM initialization.
*/
static void
kvm_init(void)
{
#ifdef DEBUG
extern void _start();
#endif
/*
* Put the kernel segments in kernel address space.
*/
as_avlinit(&kas);
(void) segkmem_create(&ktextseg);
(void) segkmem_create(&kvalloc);
/*
* We're about to map out /boot. This is the beginning of the
* system resource management transition. We can no longer
* call into /boot for I/O or memory allocations.
*
* XX64 - Is this still correct with kernelheap_extend() being called
* later than this????
*/
(void) segkmem_create(&kvseg);
#if defined(__amd64)
(void) segkmem_create(&kvseg_core);
/* segzio optimization is only valid for 64-bit kernels */
if (!segzio_fromheap) {
&kzioseg);
(void) segkmem_zio_create(&kzioseg);
/* create zio area covering new segment */
}
#endif
&kdebugseg);
(void) segkmem_create(&kdebugseg);
/*
* Ensure that the red zone at kernelbase is never accessible.
*/
/*
* Make the text writable so that it can be hot patched by DTrace.
*/
/*
* Make data writable until end.
*/
}
/*
* These are MTTR registers supported by P6
*/
/*
* Disable reprogramming of MTRRs by default.
*/
int enable_relaxed_mtrr = 0;
void
setup_mtrr(void)
{
int i, ecx;
int vcnt;
if (!(x86_feature & X86_MTRR))
return;
if (mtrrcap & MTRRCAP_FIX) {
}
vcnt = MAX_MTRRVAR;
}
}
if (x86_feature & X86_PAT) {
if (enable_relaxed_mtrr)
}
mtrr_sync();
}
/*
* Sync current cpu mtrr with the incore copy of mtrr.
* This function has to be invoked with interrupts disabled
* Currently we do not capture other cpu's. This is invoked on cpu0
* On other cpu's its invoked from mp_startup().
*/
void
mtrr_sync(void)
{
if (x86_feature & X86_PAT)
if (mtrrcap & MTRRCAP_FIX) {
}
vcnt = MAX_MTRRVAR;
}
}
/*
* resync mtrr so that BIOS is happy. Called from mdboot
*/
void
mtrr_resync(void)
{
/*
* We could have changed the default mtrr definition.
* Put it back to uncached which is what it is at power on
*/
mtrr_sync();
}
}
void
get_system_configuration(void)
{
char prop[32];
} else {
}
else
} else {
}
segmapfreelists = 0; /* use segmap driver default */
} else {
segmapfreelists = (int)lvalue;
}
}
}
/*
* Add to a memory list.
* start = start of new memory segment
* len = length of new memory segment in bytes
* new = pointer to a new struct memlist
* memlistp = memory list to which to add segment.
*/
static void
{
while (cur) {
return;
}
return;
}
}
}
void
{
}
{
}
/*ARGSUSED*/
{
panic("unexpected call to kobj_texthole_alloc()");
/*NOTREACHED*/
return (0);
}
/*ARGSUSED*/
void
{
panic("unexpected call to kobj_texthole_free()");
}
/*
* This is called just after configure() in startup().
*
* The ISALIST concept is a bit hopeless on Intel, because
* there's no guarantee of an ever-more-capable processor
* given that various parts of the instruction set may appear
* and disappear between different implementations.
*
* While it would be possible to correct it and even enhance
* it somewhat, the explicit hardware capability bitmask allows
* more flexibility.
*
* So, we just leave this alone.
*/
void
setx86isalist(void)
{
char *tp;
extern char *isa_list;
#define TBUFSIZE 1024
*tp = '\0';
#if defined(__amd64)
#endif
switch (x86_vendor) {
case X86_VENDOR_Intel:
case X86_VENDOR_AMD:
case X86_VENDOR_TM:
if (x86_feature & X86_CMOV) {
/*
* Pentium Pro or later
*/
"+mmx pentium_pro " : " ");
}
/*FALLTHROUGH*/
case X86_VENDOR_Cyrix:
/*
* The Cyrix 6x86 does not have any Pentium features
* accessible while not at privilege level 0.
*/
if (x86_feature & X86_CPUID) {
"+mmx pentium " : " ");
}
break;
default:
break;
}
}
#ifdef __amd64
void *
{
}
void
{
}
#else
void *
{
uintptr_t v;
return (NULL);
ASSERT(v >= kernelbase);
++start;
}
return (vaddr);
}
void
{
ASSERT(v >= kernelbase);
++start;
}
}
/*
* returns 1st address in range that is in device arena, or NULL
* if len is not NULL it returns the length of the toxic range
*/
void *
{
/*
* if called very early by kmdb, just return NULL
*/
if (toxic_bit_map == NULL)
return (NULL);
/*
* First check if we're completely outside the bitmap range.
*/
return (NULL);
/*
* Trim ends of search to look at only what the bitmap covers.
*/
if (v < kernelbase)
v = kernelbase;
if (end >= toxic_bit_map_len)
return (NULL);
return ((void *)v);
}
#endif